var/home/core/zuul-output/0000755000175000017500000000000015111417500014520 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111431075015467 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005035607315111431067017705 0ustar rootrootNov 25 21:30:43 crc systemd[1]: Starting Kubernetes Kubelet... Nov 25 21:30:43 crc restorecon[4683]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:43 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 21:30:44 crc restorecon[4683]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 25 21:30:44 crc restorecon[4683]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 25 21:30:44 crc kubenswrapper[4910]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 21:30:44 crc kubenswrapper[4910]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 25 21:30:44 crc kubenswrapper[4910]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 21:30:44 crc kubenswrapper[4910]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 21:30:44 crc kubenswrapper[4910]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 25 21:30:44 crc kubenswrapper[4910]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.928827 4910 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931533 4910 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931550 4910 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931554 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931560 4910 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931566 4910 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931571 4910 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931575 4910 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931579 4910 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931584 4910 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931589 4910 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931593 4910 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931598 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931603 4910 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931607 4910 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931611 4910 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931615 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931619 4910 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931622 4910 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931626 4910 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931631 4910 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931634 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931638 4910 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931648 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931652 4910 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931656 4910 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931660 4910 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931665 4910 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931670 4910 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931674 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931678 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931683 4910 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931687 4910 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931691 4910 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931697 4910 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931703 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931707 4910 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931710 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931714 4910 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931718 4910 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931722 4910 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931726 4910 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931730 4910 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931733 4910 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931737 4910 feature_gate.go:330] unrecognized feature gate: Example Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931741 4910 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931745 4910 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931750 4910 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931755 4910 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931759 4910 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931762 4910 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931766 4910 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931770 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931773 4910 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931778 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931781 4910 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931785 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931788 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931792 4910 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931795 4910 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931799 4910 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931803 4910 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931806 4910 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931811 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931815 4910 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931818 4910 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931822 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931825 4910 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931829 4910 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931833 4910 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931837 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.931841 4910 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.931924 4910 flags.go:64] FLAG: --address="0.0.0.0" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.931932 4910 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.931941 4910 flags.go:64] FLAG: --anonymous-auth="true" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.931947 4910 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.931953 4910 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.931958 4910 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.931965 4910 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.931970 4910 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.931975 4910 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.931979 4910 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.931984 4910 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.931988 4910 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.931992 4910 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.931996 4910 flags.go:64] FLAG: --cgroup-root="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932000 4910 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932004 4910 flags.go:64] FLAG: --client-ca-file="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932008 4910 flags.go:64] FLAG: --cloud-config="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932013 4910 flags.go:64] FLAG: --cloud-provider="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932017 4910 flags.go:64] FLAG: --cluster-dns="[]" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932023 4910 flags.go:64] FLAG: --cluster-domain="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932027 4910 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932032 4910 flags.go:64] FLAG: --config-dir="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932036 4910 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932040 4910 flags.go:64] FLAG: --container-log-max-files="5" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932047 4910 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932052 4910 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932057 4910 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932061 4910 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932065 4910 flags.go:64] FLAG: --contention-profiling="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932069 4910 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932074 4910 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932079 4910 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932083 4910 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932089 4910 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932093 4910 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932097 4910 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932101 4910 flags.go:64] FLAG: --enable-load-reader="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932105 4910 flags.go:64] FLAG: --enable-server="true" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932110 4910 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932117 4910 flags.go:64] FLAG: --event-burst="100" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932122 4910 flags.go:64] FLAG: --event-qps="50" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932126 4910 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932130 4910 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932134 4910 flags.go:64] FLAG: --eviction-hard="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932140 4910 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932144 4910 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932148 4910 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932152 4910 flags.go:64] FLAG: --eviction-soft="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932156 4910 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932160 4910 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932164 4910 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932168 4910 flags.go:64] FLAG: --experimental-mounter-path="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932173 4910 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932177 4910 flags.go:64] FLAG: --fail-swap-on="true" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932181 4910 flags.go:64] FLAG: --feature-gates="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932186 4910 flags.go:64] FLAG: --file-check-frequency="20s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932190 4910 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932195 4910 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932200 4910 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932204 4910 flags.go:64] FLAG: --healthz-port="10248" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932208 4910 flags.go:64] FLAG: --help="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932212 4910 flags.go:64] FLAG: --hostname-override="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932216 4910 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932220 4910 flags.go:64] FLAG: --http-check-frequency="20s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932224 4910 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932228 4910 flags.go:64] FLAG: --image-credential-provider-config="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932232 4910 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932236 4910 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932256 4910 flags.go:64] FLAG: --image-service-endpoint="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932260 4910 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932264 4910 flags.go:64] FLAG: --kube-api-burst="100" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932268 4910 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932272 4910 flags.go:64] FLAG: --kube-api-qps="50" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932276 4910 flags.go:64] FLAG: --kube-reserved="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932281 4910 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932285 4910 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932289 4910 flags.go:64] FLAG: --kubelet-cgroups="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932295 4910 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932299 4910 flags.go:64] FLAG: --lock-file="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932303 4910 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932308 4910 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932312 4910 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932319 4910 flags.go:64] FLAG: --log-json-split-stream="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932323 4910 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932327 4910 flags.go:64] FLAG: --log-text-split-stream="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932332 4910 flags.go:64] FLAG: --logging-format="text" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932336 4910 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932341 4910 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932345 4910 flags.go:64] FLAG: --manifest-url="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932352 4910 flags.go:64] FLAG: --manifest-url-header="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932361 4910 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932365 4910 flags.go:64] FLAG: --max-open-files="1000000" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932370 4910 flags.go:64] FLAG: --max-pods="110" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932375 4910 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932380 4910 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932383 4910 flags.go:64] FLAG: --memory-manager-policy="None" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932411 4910 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932416 4910 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932421 4910 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932425 4910 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932436 4910 flags.go:64] FLAG: --node-status-max-images="50" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932441 4910 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932446 4910 flags.go:64] FLAG: --oom-score-adj="-999" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932450 4910 flags.go:64] FLAG: --pod-cidr="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932455 4910 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932463 4910 flags.go:64] FLAG: --pod-manifest-path="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932468 4910 flags.go:64] FLAG: --pod-max-pids="-1" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932472 4910 flags.go:64] FLAG: --pods-per-core="0" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932477 4910 flags.go:64] FLAG: --port="10250" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932481 4910 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932485 4910 flags.go:64] FLAG: --provider-id="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932489 4910 flags.go:64] FLAG: --qos-reserved="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932494 4910 flags.go:64] FLAG: --read-only-port="10255" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932499 4910 flags.go:64] FLAG: --register-node="true" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932503 4910 flags.go:64] FLAG: --register-schedulable="true" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932507 4910 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932514 4910 flags.go:64] FLAG: --registry-burst="10" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932519 4910 flags.go:64] FLAG: --registry-qps="5" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932523 4910 flags.go:64] FLAG: --reserved-cpus="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932527 4910 flags.go:64] FLAG: --reserved-memory="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932532 4910 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932537 4910 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932542 4910 flags.go:64] FLAG: --rotate-certificates="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932546 4910 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932550 4910 flags.go:64] FLAG: --runonce="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932554 4910 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932559 4910 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932563 4910 flags.go:64] FLAG: --seccomp-default="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932567 4910 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932571 4910 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932576 4910 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932580 4910 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932584 4910 flags.go:64] FLAG: --storage-driver-password="root" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932588 4910 flags.go:64] FLAG: --storage-driver-secure="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932593 4910 flags.go:64] FLAG: --storage-driver-table="stats" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932597 4910 flags.go:64] FLAG: --storage-driver-user="root" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932601 4910 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932605 4910 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932610 4910 flags.go:64] FLAG: --system-cgroups="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932614 4910 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932620 4910 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932624 4910 flags.go:64] FLAG: --tls-cert-file="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932628 4910 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932633 4910 flags.go:64] FLAG: --tls-min-version="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932637 4910 flags.go:64] FLAG: --tls-private-key-file="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932641 4910 flags.go:64] FLAG: --topology-manager-policy="none" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932645 4910 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932649 4910 flags.go:64] FLAG: --topology-manager-scope="container" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932655 4910 flags.go:64] FLAG: --v="2" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932661 4910 flags.go:64] FLAG: --version="false" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932667 4910 flags.go:64] FLAG: --vmodule="" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932673 4910 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.932677 4910 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932790 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932794 4910 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932798 4910 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932802 4910 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932806 4910 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932810 4910 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932813 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932817 4910 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932821 4910 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932824 4910 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932828 4910 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932831 4910 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932835 4910 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932839 4910 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932844 4910 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932848 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932852 4910 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932855 4910 feature_gate.go:330] unrecognized feature gate: Example Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932860 4910 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932864 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932869 4910 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932873 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932877 4910 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932880 4910 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932885 4910 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932889 4910 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932893 4910 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932897 4910 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932902 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932906 4910 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932910 4910 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932918 4910 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932923 4910 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932927 4910 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932931 4910 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932935 4910 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932939 4910 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932943 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932946 4910 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932950 4910 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932954 4910 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932957 4910 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932961 4910 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932964 4910 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932968 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932972 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932977 4910 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932982 4910 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932986 4910 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932990 4910 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932994 4910 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.932998 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933002 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933006 4910 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933009 4910 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933013 4910 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933016 4910 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933020 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933024 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933027 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933031 4910 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933034 4910 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933038 4910 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933041 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933045 4910 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933048 4910 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933052 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933056 4910 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933059 4910 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933063 4910 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.933066 4910 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.933073 4910 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.947390 4910 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.947456 4910 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947589 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947604 4910 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947614 4910 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947625 4910 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947635 4910 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947643 4910 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947651 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947661 4910 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947670 4910 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947680 4910 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947691 4910 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947701 4910 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947709 4910 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947717 4910 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947726 4910 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947734 4910 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947742 4910 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947750 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947760 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947768 4910 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947776 4910 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947785 4910 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947792 4910 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947800 4910 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947808 4910 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947815 4910 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947823 4910 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947831 4910 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947838 4910 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947846 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947854 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947863 4910 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947871 4910 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947881 4910 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947898 4910 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947906 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947915 4910 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947923 4910 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947931 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947940 4910 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947948 4910 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947956 4910 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947964 4910 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947974 4910 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947984 4910 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.947992 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948000 4910 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948009 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948018 4910 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948025 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948036 4910 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948046 4910 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948055 4910 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948065 4910 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948074 4910 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948083 4910 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948091 4910 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948098 4910 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948106 4910 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948114 4910 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948122 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948130 4910 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948138 4910 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948146 4910 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948154 4910 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948163 4910 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948171 4910 feature_gate.go:330] unrecognized feature gate: Example Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948178 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948186 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948194 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948203 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.948219 4910 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948524 4910 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948541 4910 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948551 4910 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948561 4910 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948571 4910 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948580 4910 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948590 4910 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948599 4910 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948608 4910 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948618 4910 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948627 4910 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948635 4910 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948643 4910 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948651 4910 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948659 4910 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948666 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948674 4910 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948682 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948690 4910 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948698 4910 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948708 4910 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948719 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948726 4910 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948734 4910 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948743 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948752 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948761 4910 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948769 4910 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948777 4910 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948785 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948793 4910 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948800 4910 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948808 4910 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948815 4910 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948825 4910 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948833 4910 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948840 4910 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948848 4910 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948858 4910 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948868 4910 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948877 4910 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948885 4910 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948898 4910 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948906 4910 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948914 4910 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948922 4910 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948930 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948939 4910 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948947 4910 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948955 4910 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948964 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948972 4910 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948980 4910 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948987 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.948995 4910 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949003 4910 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949013 4910 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949021 4910 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949029 4910 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949037 4910 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949045 4910 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949053 4910 feature_gate.go:330] unrecognized feature gate: Example Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949061 4910 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949071 4910 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949081 4910 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949089 4910 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949099 4910 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949108 4910 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949118 4910 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949128 4910 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 25 21:30:44 crc kubenswrapper[4910]: W1125 21:30:44.949139 4910 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.949152 4910 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.950228 4910 server.go:940] "Client rotation is on, will bootstrap in background" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.957002 4910 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.957128 4910 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.958876 4910 server.go:997] "Starting client certificate rotation" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.958929 4910 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.960115 4910 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-12 09:41:47.67991751 +0000 UTC Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.960222 4910 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.985742 4910 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 21:30:44 crc kubenswrapper[4910]: I1125 21:30:44.992061 4910 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 21:30:44 crc kubenswrapper[4910]: E1125 21:30:44.992891 4910 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.142:6443: connect: connection refused" logger="UnhandledError" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.017574 4910 log.go:25] "Validated CRI v1 runtime API" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.055847 4910 log.go:25] "Validated CRI v1 image API" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.058639 4910 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.065027 4910 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-25-21-26-25-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.065075 4910 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.096176 4910 manager.go:217] Machine: {Timestamp:2025-11-25 21:30:45.092026061 +0000 UTC m=+0.554502473 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:b9e4d4d5-ee6d-4a38-9671-95a95b18ac40 BootID:2a34d4b8-8687-4ac8-90d3-67253e425782 Filesystems:[{Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:7e:b2:3f Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:7e:b2:3f Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:88:a0:22 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:1f:09:94 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:dc:f0:42 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:5b:3e:96 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:e2:1c:e4:5c:9d:9c Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:de:56:c8:40:e2:b8 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.096754 4910 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.096962 4910 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.099714 4910 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.100120 4910 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.100181 4910 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.100669 4910 topology_manager.go:138] "Creating topology manager with none policy" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.100689 4910 container_manager_linux.go:303] "Creating device plugin manager" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.101364 4910 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.101419 4910 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.101705 4910 state_mem.go:36] "Initialized new in-memory state store" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.101848 4910 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.106647 4910 kubelet.go:418] "Attempting to sync node with API server" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.106685 4910 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.106727 4910 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.106751 4910 kubelet.go:324] "Adding apiserver pod source" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.106770 4910 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.111941 4910 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 25 21:30:45 crc kubenswrapper[4910]: W1125 21:30:45.113032 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.142:6443: connect: connection refused Nov 25 21:30:45 crc kubenswrapper[4910]: W1125 21:30:45.113012 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.142:6443: connect: connection refused Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.113218 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.142:6443: connect: connection refused" logger="UnhandledError" Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.113224 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.142:6443: connect: connection refused" logger="UnhandledError" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.113384 4910 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.115683 4910 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.117477 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.117523 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.117539 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.117555 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.117579 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.117595 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.117610 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.117634 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.117651 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.117665 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.117687 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.117703 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.119557 4910 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.120486 4910 server.go:1280] "Started kubelet" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.120883 4910 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.142:6443: connect: connection refused Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.121528 4910 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.121527 4910 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 25 21:30:45 crc systemd[1]: Started Kubernetes Kubelet. Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.127509 4910 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.131798 4910 server.go:460] "Adding debug handlers to kubelet server" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.136000 4910 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.136146 4910 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.136381 4910 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 09:17:54.967431422 +0000 UTC Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.136858 4910 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.136956 4910 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.136981 4910 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.137093 4910 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.137603 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" interval="200ms" Nov 25 21:30:45 crc kubenswrapper[4910]: W1125 21:30:45.137807 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.142:6443: connect: connection refused Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.137924 4910 factory.go:55] Registering systemd factory Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.137943 4910 factory.go:221] Registration of the systemd container factory successfully Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.137913 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.142:6443: connect: connection refused" logger="UnhandledError" Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.136976 4910 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.142:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b5d488673b5d9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 21:30:45.120439769 +0000 UTC m=+0.582916151,LastTimestamp:2025-11-25 21:30:45.120439769 +0000 UTC m=+0.582916151,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.138392 4910 factory.go:153] Registering CRI-O factory Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.138457 4910 factory.go:221] Registration of the crio container factory successfully Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.138692 4910 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.138762 4910 factory.go:103] Registering Raw factory Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.138813 4910 manager.go:1196] Started watching for new ooms in manager Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.140120 4910 manager.go:319] Starting recovery of all containers Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.158572 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159042 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159077 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159124 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159151 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159172 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159194 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159216 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159269 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159292 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159314 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159345 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159371 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159427 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159488 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159518 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159613 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159635 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159655 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159676 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159731 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159752 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.159780 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.160834 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.160926 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.160961 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161002 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161046 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161075 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161099 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161120 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161141 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161153 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161173 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161191 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161204 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161225 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161256 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161296 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161312 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161375 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161396 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161408 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161423 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161444 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161457 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161475 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161511 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161546 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161561 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161601 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161623 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161644 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161662 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161681 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161697 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161716 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161728 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161744 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161759 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161806 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161825 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161842 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161866 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161881 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161898 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161909 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161924 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161942 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161956 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161973 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.161991 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162003 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162020 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162033 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162047 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162063 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162076 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162096 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162107 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162120 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162135 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162151 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162171 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162185 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162196 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162213 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162256 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162283 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162296 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162309 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162324 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162336 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162348 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162366 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162377 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162394 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162405 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162416 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162434 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162446 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162463 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162476 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162488 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162509 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162529 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162546 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162561 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162578 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162598 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162612 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162630 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162646 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162658 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162670 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162686 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162698 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162715 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162728 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162740 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162756 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162767 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162785 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162797 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162809 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162824 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162838 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162855 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162868 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162881 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162897 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162910 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162925 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162936 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162958 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162977 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.162993 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163008 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163025 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163039 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163060 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163079 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163102 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163123 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163139 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163160 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163175 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163190 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163208 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163223 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163264 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.163282 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165020 4910 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165052 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165071 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165084 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165097 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165112 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165123 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165137 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165149 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165162 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165178 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165189 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165203 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165213 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165225 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165262 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165274 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165289 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165299 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165310 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165329 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165344 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165362 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165376 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165387 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165411 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165428 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165444 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165459 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165477 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165496 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165508 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165534 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165551 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165562 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165575 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165586 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165596 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165610 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165620 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165635 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165645 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165655 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165668 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165788 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165807 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165825 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165836 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165847 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165861 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165872 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165893 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165904 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165923 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165945 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165957 4910 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165966 4910 reconstruct.go:97] "Volume reconstruction finished" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.165973 4910 reconciler.go:26] "Reconciler: start to sync state" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.176349 4910 manager.go:324] Recovery completed Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.189805 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.192295 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.192500 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.192644 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.194523 4910 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.194559 4910 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.194587 4910 state_mem.go:36] "Initialized new in-memory state store" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.201192 4910 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.202613 4910 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.202660 4910 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.202692 4910 kubelet.go:2335] "Starting kubelet main sync loop" Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.202804 4910 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 25 21:30:45 crc kubenswrapper[4910]: W1125 21:30:45.204046 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.142:6443: connect: connection refused Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.204117 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.142:6443: connect: connection refused" logger="UnhandledError" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.212560 4910 policy_none.go:49] "None policy: Start" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.214333 4910 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.214399 4910 state_mem.go:35] "Initializing new in-memory state store" Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.237981 4910 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.273639 4910 manager.go:334] "Starting Device Plugin manager" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.274192 4910 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.274218 4910 server.go:79] "Starting device plugin registration server" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.274819 4910 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.274841 4910 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.275197 4910 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.275322 4910 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.275336 4910 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.283661 4910 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.303718 4910 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.303801 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.304701 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.304753 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.304767 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.304987 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.305322 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.305355 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.306099 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.306128 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.306131 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.306177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.306150 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.306214 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.306438 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.306561 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.306602 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.307497 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.307514 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.307523 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.307538 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.307554 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.307588 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.307701 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.307844 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.307878 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.308652 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.308675 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.308686 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.308812 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.308848 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.308888 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.308898 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.309016 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.309051 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.309628 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.309668 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.309680 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.309793 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.309818 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.309827 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.309837 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.309865 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.310593 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.310626 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.310653 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.338789 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" interval="400ms" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.367573 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.367634 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.367654 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.367718 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.367741 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.371213 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.371434 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.371596 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.372733 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.372792 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.372875 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.372934 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.372961 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.372982 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.373008 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.375983 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.377730 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.377768 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.377780 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.377807 4910 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.378200 4910 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.142:6443: connect: connection refused" node="crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.473696 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.473767 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.473798 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.473819 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.473839 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.473860 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.473882 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.473904 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.473941 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.473967 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.473989 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474013 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474039 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474066 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474087 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474483 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474537 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474562 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474632 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474651 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474669 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474717 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474724 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474728 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474751 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474752 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474776 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474790 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474793 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.474827 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.578866 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.580616 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.580676 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.580690 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.580725 4910 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.581387 4910 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.142:6443: connect: connection refused" node="crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.643481 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.672685 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: W1125 21:30:45.695281 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-681ac336a27269fc1f35ca883f1574ffd86272cac936a2376ed389c438b96090 WatchSource:0}: Error finding container 681ac336a27269fc1f35ca883f1574ffd86272cac936a2376ed389c438b96090: Status 404 returned error can't find the container with id 681ac336a27269fc1f35ca883f1574ffd86272cac936a2376ed389c438b96090 Nov 25 21:30:45 crc kubenswrapper[4910]: W1125 21:30:45.704830 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-90401c860cbba409fc32f4740a7e327b51c1e2788917076fafc508237903db3c WatchSource:0}: Error finding container 90401c860cbba409fc32f4740a7e327b51c1e2788917076fafc508237903db3c: Status 404 returned error can't find the container with id 90401c860cbba409fc32f4740a7e327b51c1e2788917076fafc508237903db3c Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.706964 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.723845 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.733005 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 21:30:45 crc kubenswrapper[4910]: W1125 21:30:45.733040 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-c28c97b803b52673421c2aa0df51a80ecae7f49f151ea8bac0591cb033bbc385 WatchSource:0}: Error finding container c28c97b803b52673421c2aa0df51a80ecae7f49f151ea8bac0591cb033bbc385: Status 404 returned error can't find the container with id c28c97b803b52673421c2aa0df51a80ecae7f49f151ea8bac0591cb033bbc385 Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.739828 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" interval="800ms" Nov 25 21:30:45 crc kubenswrapper[4910]: W1125 21:30:45.753581 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-24119b64ed7e0a7f8906be2f452634cf4a17ba878906ebd08b38b97fb23dfcda WatchSource:0}: Error finding container 24119b64ed7e0a7f8906be2f452634cf4a17ba878906ebd08b38b97fb23dfcda: Status 404 returned error can't find the container with id 24119b64ed7e0a7f8906be2f452634cf4a17ba878906ebd08b38b97fb23dfcda Nov 25 21:30:45 crc kubenswrapper[4910]: W1125 21:30:45.763605 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-bf7ec752e21c3196f58ba8dd90f11b1641946818b60e261655fd77176ab38e41 WatchSource:0}: Error finding container bf7ec752e21c3196f58ba8dd90f11b1641946818b60e261655fd77176ab38e41: Status 404 returned error can't find the container with id bf7ec752e21c3196f58ba8dd90f11b1641946818b60e261655fd77176ab38e41 Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.981959 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.983279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.983320 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.983336 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:45 crc kubenswrapper[4910]: I1125 21:30:45.983360 4910 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 21:30:45 crc kubenswrapper[4910]: E1125 21:30:45.983693 4910 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.142:6443: connect: connection refused" node="crc" Nov 25 21:30:46 crc kubenswrapper[4910]: I1125 21:30:46.122572 4910 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.142:6443: connect: connection refused Nov 25 21:30:46 crc kubenswrapper[4910]: I1125 21:30:46.137596 4910 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 18:38:39.068765108 +0000 UTC Nov 25 21:30:46 crc kubenswrapper[4910]: I1125 21:30:46.137773 4910 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1197h7m52.930997634s for next certificate rotation Nov 25 21:30:46 crc kubenswrapper[4910]: I1125 21:30:46.207231 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"bf7ec752e21c3196f58ba8dd90f11b1641946818b60e261655fd77176ab38e41"} Nov 25 21:30:46 crc kubenswrapper[4910]: I1125 21:30:46.208731 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"24119b64ed7e0a7f8906be2f452634cf4a17ba878906ebd08b38b97fb23dfcda"} Nov 25 21:30:46 crc kubenswrapper[4910]: I1125 21:30:46.212483 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c28c97b803b52673421c2aa0df51a80ecae7f49f151ea8bac0591cb033bbc385"} Nov 25 21:30:46 crc kubenswrapper[4910]: I1125 21:30:46.213587 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"90401c860cbba409fc32f4740a7e327b51c1e2788917076fafc508237903db3c"} Nov 25 21:30:46 crc kubenswrapper[4910]: I1125 21:30:46.215043 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"681ac336a27269fc1f35ca883f1574ffd86272cac936a2376ed389c438b96090"} Nov 25 21:30:46 crc kubenswrapper[4910]: W1125 21:30:46.483348 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.142:6443: connect: connection refused Nov 25 21:30:46 crc kubenswrapper[4910]: E1125 21:30:46.483504 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.142:6443: connect: connection refused" logger="UnhandledError" Nov 25 21:30:46 crc kubenswrapper[4910]: W1125 21:30:46.504787 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.142:6443: connect: connection refused Nov 25 21:30:46 crc kubenswrapper[4910]: E1125 21:30:46.504875 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.142:6443: connect: connection refused" logger="UnhandledError" Nov 25 21:30:46 crc kubenswrapper[4910]: E1125 21:30:46.540952 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" interval="1.6s" Nov 25 21:30:46 crc kubenswrapper[4910]: W1125 21:30:46.583195 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.142:6443: connect: connection refused Nov 25 21:30:46 crc kubenswrapper[4910]: E1125 21:30:46.583300 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.142:6443: connect: connection refused" logger="UnhandledError" Nov 25 21:30:46 crc kubenswrapper[4910]: W1125 21:30:46.590323 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.142:6443: connect: connection refused Nov 25 21:30:46 crc kubenswrapper[4910]: E1125 21:30:46.590377 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.142:6443: connect: connection refused" logger="UnhandledError" Nov 25 21:30:46 crc kubenswrapper[4910]: I1125 21:30:46.784197 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:46 crc kubenswrapper[4910]: I1125 21:30:46.785475 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:46 crc kubenswrapper[4910]: I1125 21:30:46.785514 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:46 crc kubenswrapper[4910]: I1125 21:30:46.785530 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:46 crc kubenswrapper[4910]: I1125 21:30:46.785560 4910 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 21:30:46 crc kubenswrapper[4910]: E1125 21:30:46.786050 4910 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.142:6443: connect: connection refused" node="crc" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.086122 4910 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 25 21:30:47 crc kubenswrapper[4910]: E1125 21:30:47.086940 4910 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.142:6443: connect: connection refused" logger="UnhandledError" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.121782 4910 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.142:6443: connect: connection refused Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.220194 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c" exitCode=0 Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.220283 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c"} Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.220371 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.221388 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.221422 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.221434 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.221875 4910 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="70707fa96494e2e4c62c36afdbd5487211aee87f3120e14ae5e68ecbd523b7e2" exitCode=0 Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.221909 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"70707fa96494e2e4c62c36afdbd5487211aee87f3120e14ae5e68ecbd523b7e2"} Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.222078 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.222885 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.222937 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.222945 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.222985 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.224265 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.224284 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.224292 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.225334 4910 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="42dcae1e422c140f201d055b514920c37ef165bc83ace379b17311278469d953" exitCode=0 Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.225395 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"42dcae1e422c140f201d055b514920c37ef165bc83ace379b17311278469d953"} Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.225476 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.226326 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.226348 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.226357 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.227617 4910 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e" exitCode=0 Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.227719 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.227806 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e"} Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.228628 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.228659 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.228671 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.231608 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09"} Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.231645 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc"} Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.231657 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307"} Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.231666 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76"} Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.231722 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.232305 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.232334 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.232343 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:47 crc kubenswrapper[4910]: I1125 21:30:47.967673 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.122567 4910 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.142:6443: connect: connection refused Nov 25 21:30:48 crc kubenswrapper[4910]: E1125 21:30:48.142626 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" interval="3.2s" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.235923 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83"} Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.235971 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e"} Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.235983 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609"} Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.235994 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879"} Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.237782 4910 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="6c25569fcfb042699b5fd5f47c6b6aca6749363fd1ebe5e11255078114f91c95" exitCode=0 Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.237820 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"6c25569fcfb042699b5fd5f47c6b6aca6749363fd1ebe5e11255078114f91c95"} Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.237936 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.238643 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.238661 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.238669 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.240666 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"1856abcdc2d9828f760e18deb42cc996ab372e3f7ca2f560f4df9b02ac1dbb71"} Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.240722 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.246583 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.246616 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.246628 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.252837 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.253227 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.253553 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"e9997d0e99ead1f836cef297dc7e4a03323addaae2fdd218e57f3e4304316e4a"} Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.253579 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a0ebb4575cb0c108c3c669085d2c368e7be4df12aa58d929d49d495f21718f35"} Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.253589 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"5b5e51a456a225217f89ee36a8f88095352ef89c81aed13a59d2df6906194e00"} Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.254448 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.254498 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.254513 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.254517 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.254531 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.254521 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:48 crc kubenswrapper[4910]: W1125 21:30:48.353969 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.142:6443: connect: connection refused Nov 25 21:30:48 crc kubenswrapper[4910]: E1125 21:30:48.354056 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.142:6443: connect: connection refused" logger="UnhandledError" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.386929 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.388019 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.388061 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.388074 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:48 crc kubenswrapper[4910]: I1125 21:30:48.388099 4910 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 21:30:48 crc kubenswrapper[4910]: E1125 21:30:48.388611 4910 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.142:6443: connect: connection refused" node="crc" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.161974 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.260151 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b61d60d9b25627aa72090d7761d33520a1bcc72b951f133b7e1f2b2935c84f82"} Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.260327 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.261312 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.261339 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.261347 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.264073 4910 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="2f8565f957118a02f401452d27ca87a47a3331e4639543f7543fad3c758e494e" exitCode=0 Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.264112 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"2f8565f957118a02f401452d27ca87a47a3331e4639543f7543fad3c758e494e"} Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.264239 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.264276 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.264421 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.264781 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.265990 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.266011 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.266019 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.266136 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.266160 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.266172 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.266480 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.266541 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.266560 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.266889 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.266915 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.266928 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:49 crc kubenswrapper[4910]: I1125 21:30:49.630893 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.350706 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.350707 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"0e3ccd7b5c07fed4c28685a91f5b0277561f0b3c3d880f0d07ed5453966b13a2"} Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.350742 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.350757 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"05deb6c0930594d180f3998d6e35df0fe396f3d356584e3dc3a277f259b50905"} Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.350780 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"a9fffd47d3fba52d03a64476b29cfd51f5abe31590ca8a622e6ca938ccd1c41c"} Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.350711 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.350824 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.351641 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.351668 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.351675 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.352441 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.352469 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.352479 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.352761 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.352784 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.352792 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:50 crc kubenswrapper[4910]: I1125 21:30:50.812224 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.214768 4910 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.361674 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"28ee93f117857809c5cadef5a2123eb7c02d26ef97bdc72f9b4ac9aa1ab79038"} Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.362356 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2a162aa9b5484f39dd39224b00706e1c60d11b8a2219df8c9b7d06453bc7a2fb"} Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.361839 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.361774 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.364463 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.364463 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.364520 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.364567 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.364585 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.364613 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.589062 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.590547 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.590584 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.590595 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.590618 4910 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 21:30:51 crc kubenswrapper[4910]: I1125 21:30:51.672127 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:30:52 crc kubenswrapper[4910]: I1125 21:30:52.364279 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:52 crc kubenswrapper[4910]: I1125 21:30:52.364392 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:52 crc kubenswrapper[4910]: I1125 21:30:52.365370 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:52 crc kubenswrapper[4910]: I1125 21:30:52.365634 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:52 crc kubenswrapper[4910]: I1125 21:30:52.365658 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:52 crc kubenswrapper[4910]: I1125 21:30:52.365826 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:52 crc kubenswrapper[4910]: I1125 21:30:52.365877 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:52 crc kubenswrapper[4910]: I1125 21:30:52.365894 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:53 crc kubenswrapper[4910]: I1125 21:30:53.186780 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:30:53 crc kubenswrapper[4910]: I1125 21:30:53.366839 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:53 crc kubenswrapper[4910]: I1125 21:30:53.367777 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:53 crc kubenswrapper[4910]: I1125 21:30:53.367806 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:53 crc kubenswrapper[4910]: I1125 21:30:53.367814 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:53 crc kubenswrapper[4910]: I1125 21:30:53.629805 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:30:53 crc kubenswrapper[4910]: I1125 21:30:53.630104 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:53 crc kubenswrapper[4910]: I1125 21:30:53.633028 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:53 crc kubenswrapper[4910]: I1125 21:30:53.633091 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:53 crc kubenswrapper[4910]: I1125 21:30:53.633109 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:53 crc kubenswrapper[4910]: I1125 21:30:53.636063 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:30:54 crc kubenswrapper[4910]: I1125 21:30:54.370566 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:54 crc kubenswrapper[4910]: I1125 21:30:54.371593 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:54 crc kubenswrapper[4910]: I1125 21:30:54.371635 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:54 crc kubenswrapper[4910]: I1125 21:30:54.371650 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:54 crc kubenswrapper[4910]: I1125 21:30:54.639718 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:30:55 crc kubenswrapper[4910]: E1125 21:30:55.284021 4910 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 25 21:30:55 crc kubenswrapper[4910]: I1125 21:30:55.372793 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:55 crc kubenswrapper[4910]: I1125 21:30:55.374652 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:55 crc kubenswrapper[4910]: I1125 21:30:55.374756 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:55 crc kubenswrapper[4910]: I1125 21:30:55.374773 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:55 crc kubenswrapper[4910]: I1125 21:30:55.589643 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 25 21:30:55 crc kubenswrapper[4910]: I1125 21:30:55.589972 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:55 crc kubenswrapper[4910]: I1125 21:30:55.591373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:55 crc kubenswrapper[4910]: I1125 21:30:55.591410 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:55 crc kubenswrapper[4910]: I1125 21:30:55.591423 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:57 crc kubenswrapper[4910]: I1125 21:30:57.640229 4910 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 21:30:57 crc kubenswrapper[4910]: I1125 21:30:57.640375 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 21:30:57 crc kubenswrapper[4910]: I1125 21:30:57.975116 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:30:57 crc kubenswrapper[4910]: I1125 21:30:57.975696 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:57 crc kubenswrapper[4910]: I1125 21:30:57.977555 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:57 crc kubenswrapper[4910]: I1125 21:30:57.977751 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:57 crc kubenswrapper[4910]: I1125 21:30:57.977933 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:58 crc kubenswrapper[4910]: W1125 21:30:58.584847 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 25 21:30:58 crc kubenswrapper[4910]: I1125 21:30:58.584977 4910 trace.go:236] Trace[2128636531]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 21:30:48.583) (total time: 10001ms): Nov 25 21:30:58 crc kubenswrapper[4910]: Trace[2128636531]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10001ms (21:30:58.584) Nov 25 21:30:58 crc kubenswrapper[4910]: Trace[2128636531]: [10.001283957s] [10.001283957s] END Nov 25 21:30:58 crc kubenswrapper[4910]: E1125 21:30:58.585008 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 25 21:30:58 crc kubenswrapper[4910]: W1125 21:30:58.621103 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 25 21:30:58 crc kubenswrapper[4910]: I1125 21:30:58.621199 4910 trace.go:236] Trace[1447872292]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 21:30:48.620) (total time: 10000ms): Nov 25 21:30:58 crc kubenswrapper[4910]: Trace[1447872292]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": net/http: TLS handshake timeout 10000ms (21:30:58.621) Nov 25 21:30:58 crc kubenswrapper[4910]: Trace[1447872292]: [10.000982452s] [10.000982452s] END Nov 25 21:30:58 crc kubenswrapper[4910]: E1125 21:30:58.621225 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 25 21:30:59 crc kubenswrapper[4910]: W1125 21:30:59.049218 4910 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.049336 4910 trace.go:236] Trace[1550471166]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 21:30:49.048) (total time: 10000ms): Nov 25 21:30:59 crc kubenswrapper[4910]: Trace[1550471166]: ---"Objects listed" error:Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": net/http: TLS handshake timeout 10000ms (21:30:59.049) Nov 25 21:30:59 crc kubenswrapper[4910]: Trace[1550471166]: [10.000770148s] [10.000770148s] END Nov 25 21:30:59 crc kubenswrapper[4910]: E1125 21:30:59.049364 4910 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": net/http: TLS handshake timeout" logger="UnhandledError" Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.123502 4910 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.385599 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.388310 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b61d60d9b25627aa72090d7761d33520a1bcc72b951f133b7e1f2b2935c84f82" exitCode=255 Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.388353 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"b61d60d9b25627aa72090d7761d33520a1bcc72b951f133b7e1f2b2935c84f82"} Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.388565 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.389663 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.389695 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.389712 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.390426 4910 scope.go:117] "RemoveContainer" containerID="b61d60d9b25627aa72090d7761d33520a1bcc72b951f133b7e1f2b2935c84f82" Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.656230 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.656405 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.657374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.657470 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.657618 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:30:59 crc kubenswrapper[4910]: I1125 21:30:59.696595 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.393457 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.395296 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c"} Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.395525 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.395357 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.396560 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.396692 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.396763 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.396580 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.396895 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.396916 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.409879 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.520229 4910 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.520315 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.524810 4910 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.524865 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 25 21:31:00 crc kubenswrapper[4910]: I1125 21:31:00.812401 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:31:01 crc kubenswrapper[4910]: I1125 21:31:01.397996 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:31:01 crc kubenswrapper[4910]: I1125 21:31:01.399068 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:01 crc kubenswrapper[4910]: I1125 21:31:01.399104 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:01 crc kubenswrapper[4910]: I1125 21:31:01.399118 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:01 crc kubenswrapper[4910]: I1125 21:31:01.399948 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:31:01 crc kubenswrapper[4910]: I1125 21:31:01.402114 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:01 crc kubenswrapper[4910]: I1125 21:31:01.402152 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:01 crc kubenswrapper[4910]: I1125 21:31:01.402164 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:02 crc kubenswrapper[4910]: I1125 21:31:02.923219 4910 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 21:31:03 crc kubenswrapper[4910]: I1125 21:31:03.192059 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:31:03 crc kubenswrapper[4910]: I1125 21:31:03.192236 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:31:03 crc kubenswrapper[4910]: I1125 21:31:03.193204 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:03 crc kubenswrapper[4910]: I1125 21:31:03.193232 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:03 crc kubenswrapper[4910]: I1125 21:31:03.193265 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:03 crc kubenswrapper[4910]: I1125 21:31:03.198338 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:31:03 crc kubenswrapper[4910]: I1125 21:31:03.402033 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:31:03 crc kubenswrapper[4910]: I1125 21:31:03.403007 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:03 crc kubenswrapper[4910]: I1125 21:31:03.403049 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:03 crc kubenswrapper[4910]: I1125 21:31:03.403062 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.219708 4910 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.353615 4910 apiserver.go:52] "Watching apiserver" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.359172 4910 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.359407 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb"] Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.359805 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.359950 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:04 crc kubenswrapper[4910]: E1125 21:31:04.360003 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.359818 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:04 crc kubenswrapper[4910]: E1125 21:31:04.360137 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.360223 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:04 crc kubenswrapper[4910]: E1125 21:31:04.360278 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.360304 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.360679 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.361636 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.363001 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.363753 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.363617 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.364232 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.365050 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.365082 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.366419 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.366570 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.392013 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.410556 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.425134 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.438874 4910 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.440189 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.453177 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.462083 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.478498 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:04 crc kubenswrapper[4910]: I1125 21:31:04.985389 4910 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.220910 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.230685 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.247299 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.269077 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.284628 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.306952 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.516464 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.519094 4910 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.520277 4910 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.520293 4910 trace.go:236] Trace[1572872882]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (25-Nov-2025 21:30:53.019) (total time: 12500ms): Nov 25 21:31:05 crc kubenswrapper[4910]: Trace[1572872882]: ---"Objects listed" error: 12500ms (21:31:05.520) Nov 25 21:31:05 crc kubenswrapper[4910]: Trace[1572872882]: [12.500655084s] [12.500655084s] END Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.520339 4910 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.532548 4910 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.551846 4910 csr.go:261] certificate signing request csr-t4k6d is approved, waiting to be issued Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.559065 4910 csr.go:257] certificate signing request csr-t4k6d is issued Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.567103 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.573956 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.580358 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.592761 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.605474 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.619572 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.619624 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.619647 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.619667 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.619686 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.619704 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.619719 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620057 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620047 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620087 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620068 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620113 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.619735 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620195 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620207 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620229 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620294 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620322 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620351 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620376 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620400 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620425 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620451 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620452 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620474 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620501 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620529 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620555 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620579 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620607 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620631 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620658 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620664 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620684 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620706 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620730 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620790 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620819 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620834 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620841 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620865 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620913 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620938 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620962 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.620985 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621007 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621032 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621057 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621108 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621129 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621152 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621162 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621133 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621227 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621277 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621303 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621354 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621360 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621385 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621387 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621408 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621419 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621411 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621476 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621502 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621530 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621542 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621557 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621581 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621596 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621612 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621615 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621646 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621689 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621692 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621738 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621765 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621778 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621786 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621811 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621833 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621855 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621876 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621883 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621900 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621906 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621927 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621944 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621949 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.621994 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622028 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622050 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622071 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622088 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622101 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622105 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622130 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622149 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622171 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622195 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622219 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622263 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622290 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622314 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622365 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622387 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622409 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622414 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622432 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622455 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622499 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622520 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622541 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622561 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622568 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622580 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622645 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622598 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622677 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622613 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622704 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622732 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622758 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622782 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622792 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622806 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622817 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622832 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622859 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622883 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622907 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622936 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622963 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.622988 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623012 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623014 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623037 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623064 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623087 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623100 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623110 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623158 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623166 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623198 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623200 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623234 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623275 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623293 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623312 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623352 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623363 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623369 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623393 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623410 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623438 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623461 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623496 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623516 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623534 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623554 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623573 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623574 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623592 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623602 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623611 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623660 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623704 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623726 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623745 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623766 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623785 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623807 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623826 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623845 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623867 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623885 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623902 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623922 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623941 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623958 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623975 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623991 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624060 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624088 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624109 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624125 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624142 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624158 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624174 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624191 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624212 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624232 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624268 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624289 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624309 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624325 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624342 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624358 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624375 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624391 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624408 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624427 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624444 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624461 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624478 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624495 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624511 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624527 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624554 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624570 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624587 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624604 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624621 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624639 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624666 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624683 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624699 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624714 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624732 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624748 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623749 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624765 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623793 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623815 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623841 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623913 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.623977 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624205 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624473 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624820 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624847 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624522 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624868 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624880 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624887 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624929 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.624974 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625012 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625049 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625080 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625116 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625150 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625181 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625222 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625275 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625310 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625339 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625366 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625397 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625421 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625445 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625523 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625566 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625603 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625637 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625663 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625690 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625713 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625738 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625763 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625795 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625823 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625854 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625885 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625915 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626023 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626044 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626061 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626074 4910 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626086 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626100 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626113 4910 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626126 4910 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626139 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626152 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626168 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626180 4910 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626195 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626209 4910 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626223 4910 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626257 4910 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626274 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626289 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626303 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626316 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626344 4910 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626360 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626375 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626390 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626404 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626417 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626433 4910 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626450 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626466 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626480 4910 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626494 4910 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626509 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626526 4910 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626544 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626559 4910 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626572 4910 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626586 4910 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626602 4910 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626615 4910 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626628 4910 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626643 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626660 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626672 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626682 4910 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626696 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626705 4910 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626717 4910 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626732 4910 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626746 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626759 4910 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626773 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626790 4910 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.632530 4910 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.634970 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.642647 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625080 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625219 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625255 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625237 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.645739 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.650771 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.651194 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.651324 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.651651 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.652073 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.652125 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.652267 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.652418 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.652544 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.652434 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625457 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625646 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625656 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625914 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625921 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626138 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626203 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626379 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626550 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626668 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626718 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.626877 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.625317 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626984 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.627377 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.627637 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.628777 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.629192 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.626964 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.630359 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.632782 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.632921 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.640397 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.642088 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.642629 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.653101 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.653134 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.653174 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.653598 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.653702 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.653911 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.654077 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.654958 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.655504 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.655862 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.656010 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.656179 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.656601 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.642656 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.642976 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.643055 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.643069 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.643086 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.663433 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.659661 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.660053 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.652973 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:06.152925997 +0000 UTC m=+21.615402319 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.664698 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:06.164660096 +0000 UTC m=+21.627136438 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.665027 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.666226 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.667596 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.667808 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.668085 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.668564 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.668757 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.668991 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.657633 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.669342 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.669488 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.669550 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.669628 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.669812 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.669890 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.669911 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.670141 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.670388 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.670921 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.629667 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.657405 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.657419 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.657493 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.657645 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.657664 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.657732 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.658026 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.658110 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.658147 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.658547 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.658684 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.658840 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.659128 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.659213 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.659450 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.659571 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.660024 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.660153 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.660221 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.660445 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.671685 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.672351 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.672508 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.661524 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.661793 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.661922 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.661923 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.662256 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.662406 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.662658 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.672673 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.672853 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.672932 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.672953 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.673199 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.673600 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.674088 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.675486 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.675500 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.675682 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.676615 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:31:06.176570058 +0000 UTC m=+21.639046390 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.676674 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.677053 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.677081 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.677098 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.677184 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:06.177159299 +0000 UTC m=+21.639635621 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.678981 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.682039 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.685968 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.686015 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.686034 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:05 crc kubenswrapper[4910]: E1125 21:31:05.686133 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:06.186106813 +0000 UTC m=+21.648583135 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.686479 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.686522 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.687074 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.687343 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.688785 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.688886 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.688956 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.689220 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.689393 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.689732 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.689914 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.690120 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.690553 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.694476 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.694645 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.694692 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.697597 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.700932 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.701527 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.702220 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.714658 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.716301 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.719859 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.722437 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.723234 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.723466 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727528 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727561 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727640 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727645 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727656 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727707 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727718 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727779 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727727 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727826 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727837 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727847 4910 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727857 4910 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727868 4910 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727877 4910 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727896 4910 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727907 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727918 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727928 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727937 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727945 4910 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727955 4910 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727964 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727974 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727982 4910 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.727991 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728000 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728009 4910 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728019 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728028 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728037 4910 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728046 4910 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728068 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728077 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728086 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728094 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728104 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728113 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728123 4910 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728133 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728141 4910 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728150 4910 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728159 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728168 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728178 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728187 4910 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728196 4910 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728204 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728215 4910 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728225 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728234 4910 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728262 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728273 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728281 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728293 4910 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728302 4910 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728311 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728323 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728331 4910 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728341 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728600 4910 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728614 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728624 4910 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728635 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728664 4910 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728673 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728682 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728751 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728761 4910 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728771 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728782 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728791 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728799 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728808 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728817 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728826 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728834 4910 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728842 4910 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728851 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.728859 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729017 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729026 4910 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729035 4910 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729043 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729052 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729060 4910 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729070 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729081 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729090 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729070 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729099 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729167 4910 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729177 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729186 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729197 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729206 4910 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729215 4910 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729223 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729232 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729272 4910 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729282 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729291 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729301 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729310 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729318 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729327 4910 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729335 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729343 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729352 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729360 4910 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729370 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729378 4910 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729386 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729395 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729405 4910 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729414 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729423 4910 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729431 4910 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729440 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729453 4910 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729461 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729471 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729479 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729488 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729497 4910 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729505 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729513 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729521 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729530 4910 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729538 4910 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729546 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729560 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729568 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729576 4910 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729584 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729592 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729602 4910 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729611 4910 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729619 4910 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729628 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729637 4910 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729645 4910 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729653 4910 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729661 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729670 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.729684 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.734190 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.743824 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.756695 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.766084 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.775260 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.788215 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.830411 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.874336 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.884201 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 25 21:31:05 crc kubenswrapper[4910]: I1125 21:31:05.894779 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 25 21:31:05 crc kubenswrapper[4910]: W1125 21:31:05.910672 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-dabc976e4aa1fd33b801df89bcef49ec3805210ff9ee1ff4748322fafc89ca85 WatchSource:0}: Error finding container dabc976e4aa1fd33b801df89bcef49ec3805210ff9ee1ff4748322fafc89ca85: Status 404 returned error can't find the container with id dabc976e4aa1fd33b801df89bcef49ec3805210ff9ee1ff4748322fafc89ca85 Nov 25 21:31:05 crc kubenswrapper[4910]: W1125 21:31:05.913221 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-3e5bbd5934b103e02158ac7b753d77c7f78abf2d7c5cc50a30cd3724273a1d13 WatchSource:0}: Error finding container 3e5bbd5934b103e02158ac7b753d77c7f78abf2d7c5cc50a30cd3724273a1d13: Status 404 returned error can't find the container with id 3e5bbd5934b103e02158ac7b753d77c7f78abf2d7c5cc50a30cd3724273a1d13 Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.203783 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.203885 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.204119 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.203966 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.204295 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.204486 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.234063 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.234425 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:31:07.234389519 +0000 UTC m=+22.696865861 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.234985 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.235034 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.235061 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.235118 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.235153 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.235213 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:07.235201935 +0000 UTC m=+22.697678257 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.235329 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.235415 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.235444 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.235329 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.235503 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.235515 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.235535 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:07.235507811 +0000 UTC m=+22.697984313 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.235558 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:07.235550332 +0000 UTC m=+22.698026914 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.235630 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.235693 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:07.235681904 +0000 UTC m=+22.698158466 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.410452 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54"} Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.410511 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"5e209ef51efb951175f8d4b5518340083a11adfa07d93a5f6089c07a2340ffb6"} Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.411719 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.412051 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.413988 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c" exitCode=255 Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.414024 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c"} Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.414113 4910 scope.go:117] "RemoveContainer" containerID="b61d60d9b25627aa72090d7761d33520a1bcc72b951f133b7e1f2b2935c84f82" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.415870 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db"} Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.415914 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada"} Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.415928 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"3e5bbd5934b103e02158ac7b753d77c7f78abf2d7c5cc50a30cd3724273a1d13"} Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.416732 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"dabc976e4aa1fd33b801df89bcef49ec3805210ff9ee1ff4748322fafc89ca85"} Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.422729 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.435882 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.444346 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.451853 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.460468 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.467464 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.475441 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.493225 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.493259 4910 scope.go:117] "RemoveContainer" containerID="2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c" Nov 25 21:31:06 crc kubenswrapper[4910]: E1125 21:31:06.493532 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.495229 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.506130 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.524147 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.532878 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.545294 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.556101 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.560006 4910 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-11-25 21:26:05 +0000 UTC, rotation deadline is 2026-10-05 09:59:39.967502113 +0000 UTC Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.560068 4910 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7524h28m33.407436655s for next certificate rotation Nov 25 21:31:06 crc kubenswrapper[4910]: I1125 21:31:06.567350 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.209337 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.210494 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.212708 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.213960 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.215762 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.216746 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.217815 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.219543 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.220679 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.222291 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.223060 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.224917 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.225900 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.226801 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.228469 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.229499 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.231351 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.232122 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.233164 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.235304 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.236099 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.237846 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.238571 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.240132 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.240738 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.241801 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.243652 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.244002 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.244103 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:07 crc kubenswrapper[4910]: E1125 21:31:07.244181 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:31:09.244154834 +0000 UTC m=+24.706631176 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:31:07 crc kubenswrapper[4910]: E1125 21:31:07.244230 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.244264 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.244296 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:07 crc kubenswrapper[4910]: E1125 21:31:07.244323 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:09.244301587 +0000 UTC m=+24.706777959 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.244356 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:07 crc kubenswrapper[4910]: E1125 21:31:07.244408 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:31:07 crc kubenswrapper[4910]: E1125 21:31:07.244422 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.244420 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: E1125 21:31:07.244471 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:31:07 crc kubenswrapper[4910]: E1125 21:31:07.244509 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:31:07 crc kubenswrapper[4910]: E1125 21:31:07.244523 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:07 crc kubenswrapper[4910]: E1125 21:31:07.244480 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:09.244465112 +0000 UTC m=+24.706941524 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:31:07 crc kubenswrapper[4910]: E1125 21:31:07.244600 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:09.244582425 +0000 UTC m=+24.707058747 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:07 crc kubenswrapper[4910]: E1125 21:31:07.244433 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:31:07 crc kubenswrapper[4910]: E1125 21:31:07.244635 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:07 crc kubenswrapper[4910]: E1125 21:31:07.244702 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:09.244688237 +0000 UTC m=+24.707164659 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.245924 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.246666 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.247942 4910 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.248084 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.250631 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.251628 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.252107 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.253825 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.254515 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.255474 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.256101 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.257171 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.257735 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.258690 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.259363 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.260358 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.260812 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.261757 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.262302 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.263496 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.263985 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.264875 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.265335 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.266393 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.266942 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.267521 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.420420 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.422811 4910 scope.go:117] "RemoveContainer" containerID="2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.422922 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-gqjcx"] Nov 25 21:31:07 crc kubenswrapper[4910]: E1125 21:31:07.422980 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.423226 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-g8f4t"] Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.423359 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.423447 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-lpz8j"] Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.423595 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-jngcr"] Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.423593 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.423721 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-lpz8j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.425654 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.427383 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-cvj2j"] Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.429013 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.430533 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.430678 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.430851 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.431158 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.431206 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.431274 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.431496 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.431649 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.431714 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.431849 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.431891 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.431893 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.431946 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.432153 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.432207 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.434008 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.434334 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.435948 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.435948 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.436192 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.438277 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.438562 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.443025 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.457024 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.470118 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.480963 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.491232 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.503117 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.515476 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.530117 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.543802 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546263 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bczvw\" (UniqueName: \"kubernetes.io/projected/1ab2bfbf-87b6-418b-b6b9-707dd9239acc-kube-api-access-bczvw\") pod \"node-resolver-lpz8j\" (UID: \"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\") " pod="openshift-dns/node-resolver-lpz8j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546356 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-run-netns\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546384 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-etc-openvswitch\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546412 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-run-ovn-kubernetes\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546430 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-multus-cni-dir\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546446 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-os-release\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546463 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-slash\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546485 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-systemd-units\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546512 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-log-socket\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546583 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-cni-bin\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546643 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-os-release\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546687 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-run-multus-certs\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546710 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546733 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546758 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/89c4a6ab-992c-467f-92fe-1111582e1b49-proxy-tls\") pod \"machine-config-daemon-g8f4t\" (UID: \"89c4a6ab-992c-467f-92fe-1111582e1b49\") " pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546788 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-system-cni-dir\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546810 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-cni-binary-copy\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546834 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-env-overrides\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546861 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-ovnkube-config\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546885 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zsrh2\" (UniqueName: \"kubernetes.io/projected/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-kube-api-access-zsrh2\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546950 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-cnibin\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.546990 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-multus-socket-dir-parent\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547017 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-multus-conf-dir\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547073 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8m6kf\" (UniqueName: \"kubernetes.io/projected/89c4a6ab-992c-467f-92fe-1111582e1b49-kube-api-access-8m6kf\") pod \"machine-config-daemon-g8f4t\" (UID: \"89c4a6ab-992c-467f-92fe-1111582e1b49\") " pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547089 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-cnibin\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547105 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-ovn\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547121 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-node-log\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547136 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4cf48d68-85c8-45e7-8533-550e120eca12-ovn-node-metrics-cert\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547177 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-var-lib-cni-multus\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547198 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ptdrh\" (UniqueName: \"kubernetes.io/projected/4cf48d68-85c8-45e7-8533-550e120eca12-kube-api-access-ptdrh\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547213 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-var-lib-kubelet\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547271 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/751fe267-dc17-4de7-81e9-a8caab9e9817-multus-daemon-config\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547309 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cf2sq\" (UniqueName: \"kubernetes.io/projected/751fe267-dc17-4de7-81e9-a8caab9e9817-kube-api-access-cf2sq\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547332 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-kubelet\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547356 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/751fe267-dc17-4de7-81e9-a8caab9e9817-cni-binary-copy\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547378 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-run-k8s-cni-cncf-io\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547396 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-var-lib-openvswitch\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547427 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/1ab2bfbf-87b6-418b-b6b9-707dd9239acc-hosts-file\") pod \"node-resolver-lpz8j\" (UID: \"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\") " pod="openshift-dns/node-resolver-lpz8j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547459 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-system-cni-dir\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547487 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-openvswitch\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547517 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/89c4a6ab-992c-467f-92fe-1111582e1b49-rootfs\") pod \"machine-config-daemon-g8f4t\" (UID: \"89c4a6ab-992c-467f-92fe-1111582e1b49\") " pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547538 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-ovnkube-script-lib\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547555 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-cni-netd\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547572 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547599 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-var-lib-cni-bin\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547618 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/89c4a6ab-992c-467f-92fe-1111582e1b49-mcd-auth-proxy-config\") pod \"machine-config-daemon-g8f4t\" (UID: \"89c4a6ab-992c-467f-92fe-1111582e1b49\") " pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547656 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-hostroot\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547678 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-run-netns\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547733 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-systemd\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.547756 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-etc-kubernetes\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.556947 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.568328 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.581156 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.593179 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.604123 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.616486 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.634303 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648336 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-os-release\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648378 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-run-multus-certs\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648394 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-systemd-units\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648410 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-log-socket\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648427 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-cni-bin\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648442 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-system-cni-dir\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648459 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-cni-binary-copy\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648474 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648491 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648508 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/89c4a6ab-992c-467f-92fe-1111582e1b49-proxy-tls\") pod \"machine-config-daemon-g8f4t\" (UID: \"89c4a6ab-992c-467f-92fe-1111582e1b49\") " pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648525 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-ovnkube-config\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648539 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-env-overrides\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648563 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zsrh2\" (UniqueName: \"kubernetes.io/projected/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-kube-api-access-zsrh2\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648578 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-cnibin\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648594 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-multus-socket-dir-parent\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648609 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-multus-conf-dir\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648624 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8m6kf\" (UniqueName: \"kubernetes.io/projected/89c4a6ab-992c-467f-92fe-1111582e1b49-kube-api-access-8m6kf\") pod \"machine-config-daemon-g8f4t\" (UID: \"89c4a6ab-992c-467f-92fe-1111582e1b49\") " pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648639 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-cnibin\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648654 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-ovn\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648683 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-var-lib-cni-multus\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648698 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-node-log\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648715 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4cf48d68-85c8-45e7-8533-550e120eca12-ovn-node-metrics-cert\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648733 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-var-lib-kubelet\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648750 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/751fe267-dc17-4de7-81e9-a8caab9e9817-multus-daemon-config\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648765 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cf2sq\" (UniqueName: \"kubernetes.io/projected/751fe267-dc17-4de7-81e9-a8caab9e9817-kube-api-access-cf2sq\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648782 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-kubelet\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648802 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ptdrh\" (UniqueName: \"kubernetes.io/projected/4cf48d68-85c8-45e7-8533-550e120eca12-kube-api-access-ptdrh\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648818 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/1ab2bfbf-87b6-418b-b6b9-707dd9239acc-hosts-file\") pod \"node-resolver-lpz8j\" (UID: \"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\") " pod="openshift-dns/node-resolver-lpz8j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648836 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-system-cni-dir\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648851 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/751fe267-dc17-4de7-81e9-a8caab9e9817-cni-binary-copy\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648865 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-run-k8s-cni-cncf-io\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648879 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-var-lib-openvswitch\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648898 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-openvswitch\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648842 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648954 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/89c4a6ab-992c-467f-92fe-1111582e1b49-rootfs\") pod \"machine-config-daemon-g8f4t\" (UID: \"89c4a6ab-992c-467f-92fe-1111582e1b49\") " pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.648917 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/89c4a6ab-992c-467f-92fe-1111582e1b49-rootfs\") pod \"machine-config-daemon-g8f4t\" (UID: \"89c4a6ab-992c-467f-92fe-1111582e1b49\") " pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649084 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-ovnkube-script-lib\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649108 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-var-lib-cni-bin\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649129 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/89c4a6ab-992c-467f-92fe-1111582e1b49-mcd-auth-proxy-config\") pod \"machine-config-daemon-g8f4t\" (UID: \"89c4a6ab-992c-467f-92fe-1111582e1b49\") " pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649132 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-run-multus-certs\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649136 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-os-release\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649167 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-var-lib-cni-bin\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649144 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-cni-netd\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649179 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-cni-netd\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649191 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-systemd-units\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649201 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649223 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-hostroot\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649254 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-cni-bin\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649258 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-run-netns\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649274 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-run-netns\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649282 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-systemd\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649307 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-etc-kubernetes\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649325 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bczvw\" (UniqueName: \"kubernetes.io/projected/1ab2bfbf-87b6-418b-b6b9-707dd9239acc-kube-api-access-bczvw\") pod \"node-resolver-lpz8j\" (UID: \"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\") " pod="openshift-dns/node-resolver-lpz8j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649339 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-run-netns\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649380 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-etc-openvswitch\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649397 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-run-ovn-kubernetes\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649413 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-multus-cni-dir\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649428 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-os-release\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649443 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-slash\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649486 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-slash\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649509 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-system-cni-dir\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649909 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/89c4a6ab-992c-467f-92fe-1111582e1b49-mcd-auth-proxy-config\") pod \"machine-config-daemon-g8f4t\" (UID: \"89c4a6ab-992c-467f-92fe-1111582e1b49\") " pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649923 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-ovnkube-script-lib\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649955 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-hostroot\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649226 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-log-socket\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.649956 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650014 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-cni-binary-copy\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650041 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-multus-socket-dir-parent\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650150 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-ovn\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650180 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-multus-conf-dir\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650237 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-kubelet\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650345 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-cnibin\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650386 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/1ab2bfbf-87b6-418b-b6b9-707dd9239acc-hosts-file\") pod \"node-resolver-lpz8j\" (UID: \"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\") " pod="openshift-dns/node-resolver-lpz8j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650417 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/751fe267-dc17-4de7-81e9-a8caab9e9817-multus-daemon-config\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650419 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-system-cni-dir\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650468 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-var-lib-cni-multus\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650491 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-node-log\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650609 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-run-k8s-cni-cncf-io\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650638 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-var-lib-openvswitch\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650665 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-openvswitch\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650685 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-systemd\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650704 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-etc-kubernetes\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650718 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-env-overrides\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650751 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-cnibin\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650724 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-host-var-lib-kubelet\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650786 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-run-ovn-kubernetes\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650806 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-run-netns\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650826 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-etc-openvswitch\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650864 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-multus-cni-dir\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650884 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/751fe267-dc17-4de7-81e9-a8caab9e9817-cni-binary-copy\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650901 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/751fe267-dc17-4de7-81e9-a8caab9e9817-os-release\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650957 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.650950 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-ovnkube-config\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.651617 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.654253 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/89c4a6ab-992c-467f-92fe-1111582e1b49-proxy-tls\") pod \"machine-config-daemon-g8f4t\" (UID: \"89c4a6ab-992c-467f-92fe-1111582e1b49\") " pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.654567 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4cf48d68-85c8-45e7-8533-550e120eca12-ovn-node-metrics-cert\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.662726 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.672215 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bczvw\" (UniqueName: \"kubernetes.io/projected/1ab2bfbf-87b6-418b-b6b9-707dd9239acc-kube-api-access-bczvw\") pod \"node-resolver-lpz8j\" (UID: \"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\") " pod="openshift-dns/node-resolver-lpz8j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.672377 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cf2sq\" (UniqueName: \"kubernetes.io/projected/751fe267-dc17-4de7-81e9-a8caab9e9817-kube-api-access-cf2sq\") pod \"multus-gqjcx\" (UID: \"751fe267-dc17-4de7-81e9-a8caab9e9817\") " pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.672915 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zsrh2\" (UniqueName: \"kubernetes.io/projected/08d06fc8-cc2c-4b86-a391-f6cb96fad95c-kube-api-access-zsrh2\") pod \"multus-additional-cni-plugins-jngcr\" (UID: \"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\") " pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.674033 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8m6kf\" (UniqueName: \"kubernetes.io/projected/89c4a6ab-992c-467f-92fe-1111582e1b49-kube-api-access-8m6kf\") pod \"machine-config-daemon-g8f4t\" (UID: \"89c4a6ab-992c-467f-92fe-1111582e1b49\") " pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.674591 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ptdrh\" (UniqueName: \"kubernetes.io/projected/4cf48d68-85c8-45e7-8533-550e120eca12-kube-api-access-ptdrh\") pod \"ovnkube-node-cvj2j\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.674839 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.685090 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.699956 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:07Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.739562 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-gqjcx" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.746427 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:31:07 crc kubenswrapper[4910]: W1125 21:31:07.749002 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod751fe267_dc17_4de7_81e9_a8caab9e9817.slice/crio-bc873d701db04e31e13d5b7b135ab1eba1602f65fb3435c0e9550ee77a798cba WatchSource:0}: Error finding container bc873d701db04e31e13d5b7b135ab1eba1602f65fb3435c0e9550ee77a798cba: Status 404 returned error can't find the container with id bc873d701db04e31e13d5b7b135ab1eba1602f65fb3435c0e9550ee77a798cba Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.755472 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-lpz8j" Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.759105 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-jngcr" Nov 25 21:31:07 crc kubenswrapper[4910]: W1125 21:31:07.761499 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89c4a6ab_992c_467f_92fe_1111582e1b49.slice/crio-8674896b01fcbe2b13981c8fc9713140795df0df009994fbada9f5098b477e80 WatchSource:0}: Error finding container 8674896b01fcbe2b13981c8fc9713140795df0df009994fbada9f5098b477e80: Status 404 returned error can't find the container with id 8674896b01fcbe2b13981c8fc9713140795df0df009994fbada9f5098b477e80 Nov 25 21:31:07 crc kubenswrapper[4910]: I1125 21:31:07.764549 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:07 crc kubenswrapper[4910]: W1125 21:31:07.788764 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4cf48d68_85c8_45e7_8533_550e120eca12.slice/crio-48afbf381caf236ff73201c7191b44c67b199e9befc0eba5fb283b999dd0ff9b WatchSource:0}: Error finding container 48afbf381caf236ff73201c7191b44c67b199e9befc0eba5fb283b999dd0ff9b: Status 404 returned error can't find the container with id 48afbf381caf236ff73201c7191b44c67b199e9befc0eba5fb283b999dd0ff9b Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.203819 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.203847 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:08 crc kubenswrapper[4910]: E1125 21:31:08.204183 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:08 crc kubenswrapper[4910]: E1125 21:31:08.204279 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.203883 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:08 crc kubenswrapper[4910]: E1125 21:31:08.204343 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.436032 4910 generic.go:334] "Generic (PLEG): container finished" podID="4cf48d68-85c8-45e7-8533-550e120eca12" containerID="9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6" exitCode=0 Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.436109 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerDied","Data":"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6"} Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.436190 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerStarted","Data":"48afbf381caf236ff73201c7191b44c67b199e9befc0eba5fb283b999dd0ff9b"} Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.438207 4910 generic.go:334] "Generic (PLEG): container finished" podID="08d06fc8-cc2c-4b86-a391-f6cb96fad95c" containerID="4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3" exitCode=0 Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.438292 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" event={"ID":"08d06fc8-cc2c-4b86-a391-f6cb96fad95c","Type":"ContainerDied","Data":"4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3"} Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.438311 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" event={"ID":"08d06fc8-cc2c-4b86-a391-f6cb96fad95c","Type":"ContainerStarted","Data":"ee0437aeb6fd3848ef6b8c963784190037069b067c8d5585d2de29130e21c4a1"} Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.440475 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gqjcx" event={"ID":"751fe267-dc17-4de7-81e9-a8caab9e9817","Type":"ContainerStarted","Data":"7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff"} Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.440504 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gqjcx" event={"ID":"751fe267-dc17-4de7-81e9-a8caab9e9817","Type":"ContainerStarted","Data":"bc873d701db04e31e13d5b7b135ab1eba1602f65fb3435c0e9550ee77a798cba"} Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.442663 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-lpz8j" event={"ID":"1ab2bfbf-87b6-418b-b6b9-707dd9239acc","Type":"ContainerStarted","Data":"97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e"} Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.442724 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-lpz8j" event={"ID":"1ab2bfbf-87b6-418b-b6b9-707dd9239acc","Type":"ContainerStarted","Data":"cbb699fdbc56f95a48fd6a815e0d0b939a7b0bc4bdac1b43cd9e59b07b0211c8"} Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.449597 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806"} Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.449649 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547"} Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.449659 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"8674896b01fcbe2b13981c8fc9713140795df0df009994fbada9f5098b477e80"} Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.458617 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.476428 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.499191 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.527622 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.550046 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.566667 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.581542 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.597331 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.614059 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.633818 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.648716 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.658554 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.670373 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.684061 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.696271 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.711917 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.726633 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.746445 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.763162 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.781694 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.798632 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.817586 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.868481 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.894255 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.910670 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:08 crc kubenswrapper[4910]: I1125 21:31:08.925687 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:08Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.266009 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:31:09 crc kubenswrapper[4910]: E1125 21:31:09.266199 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:31:13.266173075 +0000 UTC m=+28.728649397 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.266567 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.266622 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.266682 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:09 crc kubenswrapper[4910]: E1125 21:31:09.266725 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.266746 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:09 crc kubenswrapper[4910]: E1125 21:31:09.266779 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:13.26676785 +0000 UTC m=+28.729244182 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:31:09 crc kubenswrapper[4910]: E1125 21:31:09.266822 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:31:09 crc kubenswrapper[4910]: E1125 21:31:09.266855 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:13.266846042 +0000 UTC m=+28.729322414 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:31:09 crc kubenswrapper[4910]: E1125 21:31:09.266924 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:31:09 crc kubenswrapper[4910]: E1125 21:31:09.266946 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:31:09 crc kubenswrapper[4910]: E1125 21:31:09.266960 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:09 crc kubenswrapper[4910]: E1125 21:31:09.266988 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:13.266980046 +0000 UTC m=+28.729456368 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:09 crc kubenswrapper[4910]: E1125 21:31:09.267173 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:31:09 crc kubenswrapper[4910]: E1125 21:31:09.267260 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:31:09 crc kubenswrapper[4910]: E1125 21:31:09.267334 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:09 crc kubenswrapper[4910]: E1125 21:31:09.267434 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:13.267419627 +0000 UTC m=+28.729895939 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.454193 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe"} Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.458607 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerStarted","Data":"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204"} Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.458662 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerStarted","Data":"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978"} Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.458683 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerStarted","Data":"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515"} Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.458700 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerStarted","Data":"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7"} Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.460209 4910 generic.go:334] "Generic (PLEG): container finished" podID="08d06fc8-cc2c-4b86-a391-f6cb96fad95c" containerID="7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318" exitCode=0 Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.460235 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" event={"ID":"08d06fc8-cc2c-4b86-a391-f6cb96fad95c","Type":"ContainerDied","Data":"7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318"} Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.474179 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.491268 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.505811 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.519343 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.536619 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.552266 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.565520 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.580550 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.597437 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.615928 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.627957 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.642524 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.655504 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.668779 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.680012 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.693096 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.704235 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.716764 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.730491 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.746149 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.765079 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.779158 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.790867 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.802871 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.816659 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:09 crc kubenswrapper[4910]: I1125 21:31:09.833111 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:09Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.202916 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.202976 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:10 crc kubenswrapper[4910]: E1125 21:31:10.203060 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.203106 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:10 crc kubenswrapper[4910]: E1125 21:31:10.203297 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:10 crc kubenswrapper[4910]: E1125 21:31:10.203440 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.470289 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerStarted","Data":"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326"} Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.470365 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerStarted","Data":"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544"} Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.474416 4910 generic.go:334] "Generic (PLEG): container finished" podID="08d06fc8-cc2c-4b86-a391-f6cb96fad95c" containerID="4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47" exitCode=0 Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.474524 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" event={"ID":"08d06fc8-cc2c-4b86-a391-f6cb96fad95c","Type":"ContainerDied","Data":"4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47"} Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.506161 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:10Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.526544 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:10Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.542911 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:10Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.562758 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:10Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.585716 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:10Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.605038 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:10Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.620053 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:10Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.635286 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:10Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.653022 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:10Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.670392 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:10Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.686717 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:10Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.701938 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:10Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:10 crc kubenswrapper[4910]: I1125 21:31:10.715784 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:10Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.487021 4910 generic.go:334] "Generic (PLEG): container finished" podID="08d06fc8-cc2c-4b86-a391-f6cb96fad95c" containerID="8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874" exitCode=0 Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.487112 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" event={"ID":"08d06fc8-cc2c-4b86-a391-f6cb96fad95c","Type":"ContainerDied","Data":"8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874"} Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.508694 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.527581 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.543604 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.567068 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.583264 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.600372 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.618908 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.637160 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.655298 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.666632 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.687960 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.704549 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.718655 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.920635 4910 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.921786 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.924170 4910 scope.go:117] "RemoveContainer" containerID="2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c" Nov 25 21:31:11 crc kubenswrapper[4910]: E1125 21:31:11.924626 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.925483 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.925530 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.925545 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.925687 4910 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.935153 4910 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.935390 4910 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.936404 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.936428 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.936436 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.936449 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.936459 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:11Z","lastTransitionTime":"2025-11-25T21:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:11 crc kubenswrapper[4910]: E1125 21:31:11.956227 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.959236 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.959276 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.959285 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.959298 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.959307 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:11Z","lastTransitionTime":"2025-11-25T21:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:11 crc kubenswrapper[4910]: E1125 21:31:11.971976 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.975649 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.975683 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.975691 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.975704 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.975715 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:11Z","lastTransitionTime":"2025-11-25T21:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:11 crc kubenswrapper[4910]: E1125 21:31:11.989008 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:11Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.993213 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.993284 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.993299 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.993318 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:11 crc kubenswrapper[4910]: I1125 21:31:11.993332 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:11Z","lastTransitionTime":"2025-11-25T21:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:12 crc kubenswrapper[4910]: E1125 21:31:12.005173 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.009059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.009105 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.009119 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.009135 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.009145 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:12Z","lastTransitionTime":"2025-11-25T21:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:12 crc kubenswrapper[4910]: E1125 21:31:12.021786 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: E1125 21:31:12.021981 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.023647 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.023675 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.023684 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.023698 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.023708 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:12Z","lastTransitionTime":"2025-11-25T21:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.126042 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.126082 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.126094 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.126112 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.126125 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:12Z","lastTransitionTime":"2025-11-25T21:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.203642 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:12 crc kubenswrapper[4910]: E1125 21:31:12.203779 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.203660 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:12 crc kubenswrapper[4910]: E1125 21:31:12.203877 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.204270 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:12 crc kubenswrapper[4910]: E1125 21:31:12.204598 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.228108 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.228160 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.228172 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.228191 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.228204 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:12Z","lastTransitionTime":"2025-11-25T21:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.330700 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.330740 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.330748 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.330761 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.330788 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:12Z","lastTransitionTime":"2025-11-25T21:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.433278 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.433324 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.433335 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.433352 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.433363 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:12Z","lastTransitionTime":"2025-11-25T21:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.499198 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerStarted","Data":"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e"} Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.503088 4910 generic.go:334] "Generic (PLEG): container finished" podID="08d06fc8-cc2c-4b86-a391-f6cb96fad95c" containerID="c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88" exitCode=0 Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.503150 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" event={"ID":"08d06fc8-cc2c-4b86-a391-f6cb96fad95c","Type":"ContainerDied","Data":"c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88"} Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.520433 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.535865 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.536275 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.536430 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.536535 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.536710 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:12Z","lastTransitionTime":"2025-11-25T21:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.540259 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.562183 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.574014 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.588162 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.602417 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.614381 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.624740 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.636503 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.639938 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.639986 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.639998 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.640017 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.640030 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:12Z","lastTransitionTime":"2025-11-25T21:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.649654 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.662100 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.671668 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.682633 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:12Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.742372 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.742411 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.742420 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.742433 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.742451 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:12Z","lastTransitionTime":"2025-11-25T21:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.844953 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.844993 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.845003 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.845019 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.845028 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:12Z","lastTransitionTime":"2025-11-25T21:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.947537 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.947597 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.947618 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.947642 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:12 crc kubenswrapper[4910]: I1125 21:31:12.947671 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:12Z","lastTransitionTime":"2025-11-25T21:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.050142 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.050193 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.050204 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.050220 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.050232 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:13Z","lastTransitionTime":"2025-11-25T21:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.152299 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.152352 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.152363 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.152380 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.152391 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:13Z","lastTransitionTime":"2025-11-25T21:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.255009 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.255070 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.255091 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.255121 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.255141 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:13Z","lastTransitionTime":"2025-11-25T21:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.302883 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.303184 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:13 crc kubenswrapper[4910]: E1125 21:31:13.303303 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:31:21.30322221 +0000 UTC m=+36.765698572 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.303403 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:13 crc kubenswrapper[4910]: E1125 21:31:13.303483 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:31:13 crc kubenswrapper[4910]: E1125 21:31:13.303590 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:21.303561708 +0000 UTC m=+36.766038210 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:31:13 crc kubenswrapper[4910]: E1125 21:31:13.303702 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:31:13 crc kubenswrapper[4910]: E1125 21:31:13.303711 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:31:13 crc kubenswrapper[4910]: E1125 21:31:13.303744 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:31:13 crc kubenswrapper[4910]: E1125 21:31:13.303770 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:31:13 crc kubenswrapper[4910]: E1125 21:31:13.303794 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:13 crc kubenswrapper[4910]: E1125 21:31:13.303815 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:13 crc kubenswrapper[4910]: E1125 21:31:13.303871 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:21.303854196 +0000 UTC m=+36.766330558 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.303490 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:13 crc kubenswrapper[4910]: E1125 21:31:13.303913 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:21.303899177 +0000 UTC m=+36.766375539 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.304075 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:13 crc kubenswrapper[4910]: E1125 21:31:13.304167 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:31:13 crc kubenswrapper[4910]: E1125 21:31:13.304222 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:21.304198054 +0000 UTC m=+36.766674406 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.358077 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.358122 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.358130 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.358145 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.358156 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:13Z","lastTransitionTime":"2025-11-25T21:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.460854 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.460908 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.460920 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.460939 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.460950 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:13Z","lastTransitionTime":"2025-11-25T21:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.511571 4910 generic.go:334] "Generic (PLEG): container finished" podID="08d06fc8-cc2c-4b86-a391-f6cb96fad95c" containerID="84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9" exitCode=0 Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.511600 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" event={"ID":"08d06fc8-cc2c-4b86-a391-f6cb96fad95c","Type":"ContainerDied","Data":"84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9"} Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.526534 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.551734 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.565152 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.565177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.565188 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.565209 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.565224 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:13Z","lastTransitionTime":"2025-11-25T21:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.568432 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.583728 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.602329 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.615842 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.631107 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.647539 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.662564 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.673346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.673904 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.674015 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.674091 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.674127 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:13Z","lastTransitionTime":"2025-11-25T21:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.678364 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.701426 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.716698 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.736436 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.776586 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.776620 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.776630 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.776649 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.776661 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:13Z","lastTransitionTime":"2025-11-25T21:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.880198 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.880305 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.880325 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.880784 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.880845 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:13Z","lastTransitionTime":"2025-11-25T21:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.984443 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.984494 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.984506 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.984525 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:13 crc kubenswrapper[4910]: I1125 21:31:13.984539 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:13Z","lastTransitionTime":"2025-11-25T21:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.087113 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.087174 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.087191 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.087220 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.087238 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:14Z","lastTransitionTime":"2025-11-25T21:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.133733 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-rffgq"] Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.134338 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-rffgq" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.137485 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.137814 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.138045 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.139201 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.160757 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.176117 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.189317 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.190531 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.190566 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.190574 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.190588 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.190600 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:14Z","lastTransitionTime":"2025-11-25T21:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.203016 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.203108 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.203103 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:14 crc kubenswrapper[4910]: E1125 21:31:14.203186 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:14 crc kubenswrapper[4910]: E1125 21:31:14.203427 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:14 crc kubenswrapper[4910]: E1125 21:31:14.203557 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.204014 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.217367 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.239354 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.254575 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.269478 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.283795 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.292817 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.292850 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.292858 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.292874 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.292886 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:14Z","lastTransitionTime":"2025-11-25T21:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.303795 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.313563 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2xbf\" (UniqueName: \"kubernetes.io/projected/6a03281c-d203-4d00-a8d8-c6ac28edd03b-kube-api-access-s2xbf\") pod \"node-ca-rffgq\" (UID: \"6a03281c-d203-4d00-a8d8-c6ac28edd03b\") " pod="openshift-image-registry/node-ca-rffgq" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.313623 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6a03281c-d203-4d00-a8d8-c6ac28edd03b-host\") pod \"node-ca-rffgq\" (UID: \"6a03281c-d203-4d00-a8d8-c6ac28edd03b\") " pod="openshift-image-registry/node-ca-rffgq" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.313801 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/6a03281c-d203-4d00-a8d8-c6ac28edd03b-serviceca\") pod \"node-ca-rffgq\" (UID: \"6a03281c-d203-4d00-a8d8-c6ac28edd03b\") " pod="openshift-image-registry/node-ca-rffgq" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.318235 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.332914 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.353295 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.374049 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.396210 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.396302 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.396319 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.396359 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.396372 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:14Z","lastTransitionTime":"2025-11-25T21:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.415141 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6a03281c-d203-4d00-a8d8-c6ac28edd03b-host\") pod \"node-ca-rffgq\" (UID: \"6a03281c-d203-4d00-a8d8-c6ac28edd03b\") " pod="openshift-image-registry/node-ca-rffgq" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.415359 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/6a03281c-d203-4d00-a8d8-c6ac28edd03b-serviceca\") pod \"node-ca-rffgq\" (UID: \"6a03281c-d203-4d00-a8d8-c6ac28edd03b\") " pod="openshift-image-registry/node-ca-rffgq" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.415383 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6a03281c-d203-4d00-a8d8-c6ac28edd03b-host\") pod \"node-ca-rffgq\" (UID: \"6a03281c-d203-4d00-a8d8-c6ac28edd03b\") " pod="openshift-image-registry/node-ca-rffgq" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.415458 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2xbf\" (UniqueName: \"kubernetes.io/projected/6a03281c-d203-4d00-a8d8-c6ac28edd03b-kube-api-access-s2xbf\") pod \"node-ca-rffgq\" (UID: \"6a03281c-d203-4d00-a8d8-c6ac28edd03b\") " pod="openshift-image-registry/node-ca-rffgq" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.417581 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/6a03281c-d203-4d00-a8d8-c6ac28edd03b-serviceca\") pod \"node-ca-rffgq\" (UID: \"6a03281c-d203-4d00-a8d8-c6ac28edd03b\") " pod="openshift-image-registry/node-ca-rffgq" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.438393 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2xbf\" (UniqueName: \"kubernetes.io/projected/6a03281c-d203-4d00-a8d8-c6ac28edd03b-kube-api-access-s2xbf\") pod \"node-ca-rffgq\" (UID: \"6a03281c-d203-4d00-a8d8-c6ac28edd03b\") " pod="openshift-image-registry/node-ca-rffgq" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.455592 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-rffgq" Nov 25 21:31:14 crc kubenswrapper[4910]: W1125 21:31:14.476629 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a03281c_d203_4d00_a8d8_c6ac28edd03b.slice/crio-7b7fbc09f782e0dbb793e8f17622f26e4548d647a5ae547bb949c655c83ef615 WatchSource:0}: Error finding container 7b7fbc09f782e0dbb793e8f17622f26e4548d647a5ae547bb949c655c83ef615: Status 404 returned error can't find the container with id 7b7fbc09f782e0dbb793e8f17622f26e4548d647a5ae547bb949c655c83ef615 Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.499932 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.499987 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.500002 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.500028 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.500043 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:14Z","lastTransitionTime":"2025-11-25T21:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.517574 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-rffgq" event={"ID":"6a03281c-d203-4d00-a8d8-c6ac28edd03b","Type":"ContainerStarted","Data":"7b7fbc09f782e0dbb793e8f17622f26e4548d647a5ae547bb949c655c83ef615"} Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.526911 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerStarted","Data":"3df9f4fca222fc835eaed18778a61fe48c5484672b55a0a75809741abe796943"} Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.527290 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.534714 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" event={"ID":"08d06fc8-cc2c-4b86-a391-f6cb96fad95c","Type":"ContainerStarted","Data":"a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84"} Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.544227 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.560451 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.561938 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.574515 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.590319 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.603365 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.603413 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.603426 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.603445 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.603460 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:14Z","lastTransitionTime":"2025-11-25T21:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.610493 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3df9f4fca222fc835eaed18778a61fe48c5484672b55a0a75809741abe796943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.625581 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.643309 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.658154 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.673351 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.690997 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.707564 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.708202 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.708265 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.708279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.708305 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.708323 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:14Z","lastTransitionTime":"2025-11-25T21:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.729951 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.747828 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.763919 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.791782 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.813914 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.813957 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.813969 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.813987 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.813999 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:14Z","lastTransitionTime":"2025-11-25T21:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.836862 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.855227 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.869357 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.882816 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.930482 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.930530 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.930544 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.930566 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.930578 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:14Z","lastTransitionTime":"2025-11-25T21:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.934476 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.947168 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.961994 4910 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.962571 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3df9f4fca222fc835eaed18778a61fe48c5484672b55a0a75809741abe796943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Patch \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ovn-kubernetes/pods/ovnkube-node-cvj2j/status\": read tcp 38.102.83.142:54622->38.102.83.142:6443: use of closed network connection" Nov 25 21:31:14 crc kubenswrapper[4910]: W1125 21:31:14.963211 4910 reflector.go:484] object-"openshift-image-registry"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Nov 25 21:31:14 crc kubenswrapper[4910]: W1125 21:31:14.963369 4910 reflector.go:484] object-"openshift-image-registry"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Nov 25 21:31:14 crc kubenswrapper[4910]: W1125 21:31:14.963731 4910 reflector.go:484] object-"openshift-image-registry"/"node-ca-dockercfg-4777p": watch of *v1.Secret ended with: very short watch: object-"openshift-image-registry"/"node-ca-dockercfg-4777p": Unexpected watch close - watch lasted less than a second and no items received Nov 25 21:31:14 crc kubenswrapper[4910]: W1125 21:31:14.963925 4910 reflector.go:484] object-"openshift-image-registry"/"image-registry-certificates": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-image-registry"/"image-registry-certificates": Unexpected watch close - watch lasted less than a second and no items received Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.987135 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:14 crc kubenswrapper[4910]: I1125 21:31:14.998785 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:14Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.010756 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.025861 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.032484 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.032510 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.032519 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.032532 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.032542 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:15Z","lastTransitionTime":"2025-11-25T21:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.039690 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.052047 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.135653 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.135695 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.135723 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.135741 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.135754 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:15Z","lastTransitionTime":"2025-11-25T21:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.217414 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.231476 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.238284 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.238321 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.238344 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.238386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.238395 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:15Z","lastTransitionTime":"2025-11-25T21:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.243522 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.258951 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.272033 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.285622 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.305683 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3df9f4fca222fc835eaed18778a61fe48c5484672b55a0a75809741abe796943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.315206 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.334425 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.340183 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.340211 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.340219 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.340233 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.340255 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:15Z","lastTransitionTime":"2025-11-25T21:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.353635 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.366274 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.379604 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.391427 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.406294 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.442055 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.442098 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.442111 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.442127 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.442137 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:15Z","lastTransitionTime":"2025-11-25T21:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.539643 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-rffgq" event={"ID":"6a03281c-d203-4d00-a8d8-c6ac28edd03b","Type":"ContainerStarted","Data":"719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd"} Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.539744 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.540285 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.544705 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.544779 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.544802 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.544829 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.544850 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:15Z","lastTransitionTime":"2025-11-25T21:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.555121 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.561739 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.569236 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.593462 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3df9f4fca222fc835eaed18778a61fe48c5484672b55a0a75809741abe796943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.605819 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.619584 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.636982 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.647173 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.647239 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.647267 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.647288 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.647302 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:15Z","lastTransitionTime":"2025-11-25T21:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.650820 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.662505 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.672844 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.685890 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.700220 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.718487 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.731001 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.740983 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.749009 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.749048 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.749059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.749078 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.749090 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:15Z","lastTransitionTime":"2025-11-25T21:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.752724 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.766134 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.775760 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.785615 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.788376 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.798307 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.808820 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.824355 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.845650 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3df9f4fca222fc835eaed18778a61fe48c5484672b55a0a75809741abe796943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.850846 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.850868 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.850875 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.850889 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.850901 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:15Z","lastTransitionTime":"2025-11-25T21:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.857644 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.869407 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.879456 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.891277 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.902292 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.912203 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.953030 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.953357 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.953381 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.953388 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.953401 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:15 crc kubenswrapper[4910]: I1125 21:31:15.953411 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:15Z","lastTransitionTime":"2025-11-25T21:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.055934 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.055967 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.055978 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.055994 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.056007 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:16Z","lastTransitionTime":"2025-11-25T21:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.158383 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.158413 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.158421 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.158436 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.158446 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:16Z","lastTransitionTime":"2025-11-25T21:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.203175 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.203175 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:16 crc kubenswrapper[4910]: E1125 21:31:16.203361 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.203391 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:16 crc kubenswrapper[4910]: E1125 21:31:16.203439 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:16 crc kubenswrapper[4910]: E1125 21:31:16.203527 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.260394 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.260501 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.260517 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.260538 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.260553 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:16Z","lastTransitionTime":"2025-11-25T21:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.363416 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.363456 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.363467 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.363482 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.363491 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:16Z","lastTransitionTime":"2025-11-25T21:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.466571 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.466597 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.466605 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.466619 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.466628 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:16Z","lastTransitionTime":"2025-11-25T21:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.524128 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.542827 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.547789 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.569662 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.569703 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.569713 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.569731 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.569740 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:16Z","lastTransitionTime":"2025-11-25T21:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.671675 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.671709 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.671718 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.671733 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.671743 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:16Z","lastTransitionTime":"2025-11-25T21:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.773919 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.773955 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.773974 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.773989 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.774000 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:16Z","lastTransitionTime":"2025-11-25T21:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.875748 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.875798 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.875809 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.875829 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.875843 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:16Z","lastTransitionTime":"2025-11-25T21:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.978535 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.978573 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.978582 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.978596 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:16 crc kubenswrapper[4910]: I1125 21:31:16.978605 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:16Z","lastTransitionTime":"2025-11-25T21:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.081370 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.081410 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.081424 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.081443 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.081454 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:17Z","lastTransitionTime":"2025-11-25T21:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.184175 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.184214 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.184223 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.184255 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.184264 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:17Z","lastTransitionTime":"2025-11-25T21:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.287076 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.287138 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.287161 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.287414 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.287438 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:17Z","lastTransitionTime":"2025-11-25T21:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.389800 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.389835 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.389845 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.389860 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.389870 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:17Z","lastTransitionTime":"2025-11-25T21:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.492798 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.493077 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.493259 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.493324 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.493385 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:17Z","lastTransitionTime":"2025-11-25T21:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.547110 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/0.log" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.549993 4910 generic.go:334] "Generic (PLEG): container finished" podID="4cf48d68-85c8-45e7-8533-550e120eca12" containerID="3df9f4fca222fc835eaed18778a61fe48c5484672b55a0a75809741abe796943" exitCode=1 Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.550031 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerDied","Data":"3df9f4fca222fc835eaed18778a61fe48c5484672b55a0a75809741abe796943"} Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.550660 4910 scope.go:117] "RemoveContainer" containerID="3df9f4fca222fc835eaed18778a61fe48c5484672b55a0a75809741abe796943" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.565736 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:17Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.576552 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:17Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.593083 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:17Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.595569 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.595601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.595622 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.595639 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.595650 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:17Z","lastTransitionTime":"2025-11-25T21:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.604593 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:17Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.617429 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:17Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.626294 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:17Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.637584 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:17Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.650319 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:17Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.664914 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:17Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.676001 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:17Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.691642 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:17Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.698463 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.698493 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.698502 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.698517 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.698527 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:17Z","lastTransitionTime":"2025-11-25T21:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.709595 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:17Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.732125 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3df9f4fca222fc835eaed18778a61fe48c5484672b55a0a75809741abe796943\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3df9f4fca222fc835eaed18778a61fe48c5484672b55a0a75809741abe796943\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:17Z\\\",\\\"message\\\":\\\"controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:17.062130 6179 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 21:31:17.062263 6179 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 21:31:17.062313 6179 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 21:31:17.062347 6179 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 21:31:17.062377 6179 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 21:31:17.062388 6179 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 21:31:17.062413 6179 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 21:31:17.062432 6179 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 21:31:17.062461 6179 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 21:31:17.062475 6179 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 21:31:17.062507 6179 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 21:31:17.062514 6179 factory.go:656] Stopping watch factory\\\\nI1125 21:31:17.062574 6179 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 21:31:17.062579 6179 ovnkube.go:599] Stopped ovnkube\\\\nI1125 21:31:17.062526 6179 handler.go:208] Removed *v1.Node event handler 7\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:17Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.745214 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:17Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.800517 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.800554 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.800586 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.800601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.800610 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:17Z","lastTransitionTime":"2025-11-25T21:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.903273 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.903302 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.903311 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.903325 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:17 crc kubenswrapper[4910]: I1125 21:31:17.903334 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:17Z","lastTransitionTime":"2025-11-25T21:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.005745 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.005821 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.005829 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.005843 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.005853 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:18Z","lastTransitionTime":"2025-11-25T21:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.108552 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.108595 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.108604 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.108620 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.108632 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:18Z","lastTransitionTime":"2025-11-25T21:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.203634 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.203684 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:18 crc kubenswrapper[4910]: E1125 21:31:18.203783 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:18 crc kubenswrapper[4910]: E1125 21:31:18.203931 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.204132 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:18 crc kubenswrapper[4910]: E1125 21:31:18.204323 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.211177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.211214 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.211222 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.211237 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.211280 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:18Z","lastTransitionTime":"2025-11-25T21:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.315008 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.315056 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.315067 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.315082 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.315092 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:18Z","lastTransitionTime":"2025-11-25T21:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.417933 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.417983 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.417992 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.418008 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.418021 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:18Z","lastTransitionTime":"2025-11-25T21:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.521081 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.521345 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.521359 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.521373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.521381 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:18Z","lastTransitionTime":"2025-11-25T21:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.556388 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/1.log" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.557331 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/0.log" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.562172 4910 generic.go:334] "Generic (PLEG): container finished" podID="4cf48d68-85c8-45e7-8533-550e120eca12" containerID="b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a" exitCode=1 Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.562237 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerDied","Data":"b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a"} Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.562341 4910 scope.go:117] "RemoveContainer" containerID="3df9f4fca222fc835eaed18778a61fe48c5484672b55a0a75809741abe796943" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.563537 4910 scope.go:117] "RemoveContainer" containerID="b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a" Nov 25 21:31:18 crc kubenswrapper[4910]: E1125 21:31:18.563903 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.581370 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:18Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.601785 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:18Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.619780 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:18Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.624923 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.625122 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.625184 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.625333 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.625398 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:18Z","lastTransitionTime":"2025-11-25T21:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.642356 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:18Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.662346 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3df9f4fca222fc835eaed18778a61fe48c5484672b55a0a75809741abe796943\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:17Z\\\",\\\"message\\\":\\\"controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:17.062130 6179 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 21:31:17.062263 6179 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 21:31:17.062313 6179 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 21:31:17.062347 6179 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 21:31:17.062377 6179 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 21:31:17.062388 6179 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 21:31:17.062413 6179 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 21:31:17.062432 6179 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 21:31:17.062461 6179 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 21:31:17.062475 6179 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 21:31:17.062507 6179 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 21:31:17.062514 6179 factory.go:656] Stopping watch factory\\\\nI1125 21:31:17.062574 6179 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 21:31:17.062579 6179 ovnkube.go:599] Stopped ovnkube\\\\nI1125 21:31:17.062526 6179 handler.go:208] Removed *v1.Node event handler 7\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:18Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429340 6330 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429448 6330 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429528 6330 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429588 6330 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429648 6330 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429719 6330 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429827 6330 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429996 6330 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:18Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.675792 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:18Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.690763 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:18Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.706194 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:18Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.719579 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:18Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.728072 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.728112 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.728122 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.728138 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.728147 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:18Z","lastTransitionTime":"2025-11-25T21:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.735786 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:18Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.749935 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:18Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.759551 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:18Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.771914 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:18Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.787406 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:18Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.831090 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.831133 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.831144 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.831160 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.831170 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:18Z","lastTransitionTime":"2025-11-25T21:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.934713 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.934798 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.934817 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.934848 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:18 crc kubenswrapper[4910]: I1125 21:31:18.934874 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:18Z","lastTransitionTime":"2025-11-25T21:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.041932 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.042022 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.042056 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.042105 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.042127 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:19Z","lastTransitionTime":"2025-11-25T21:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.145844 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.145899 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.145911 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.145930 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.145945 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:19Z","lastTransitionTime":"2025-11-25T21:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.248490 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.248576 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.248597 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.248629 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.248653 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:19Z","lastTransitionTime":"2025-11-25T21:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.351373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.351483 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.351513 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.351553 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.351579 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:19Z","lastTransitionTime":"2025-11-25T21:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.377227 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4"] Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.377702 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.381409 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.384314 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.398769 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.416286 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.429057 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.447944 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.454271 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.454299 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.454312 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.454332 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.454344 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:19Z","lastTransitionTime":"2025-11-25T21:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.465898 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.471792 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ml2qb\" (UniqueName: \"kubernetes.io/projected/4d503d4a-1acf-4d36-a9fc-a33c8255e4ff-kube-api-access-ml2qb\") pod \"ovnkube-control-plane-749d76644c-wp6p4\" (UID: \"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.471974 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4d503d4a-1acf-4d36-a9fc-a33c8255e4ff-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-wp6p4\" (UID: \"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.472044 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4d503d4a-1acf-4d36-a9fc-a33c8255e4ff-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-wp6p4\" (UID: \"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.472102 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4d503d4a-1acf-4d36-a9fc-a33c8255e4ff-env-overrides\") pod \"ovnkube-control-plane-749d76644c-wp6p4\" (UID: \"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.483658 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.498317 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.510920 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.522379 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.537527 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.550817 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.557369 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.557405 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.557418 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.557437 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.557451 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:19Z","lastTransitionTime":"2025-11-25T21:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.566178 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/1.log" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.569102 4910 scope.go:117] "RemoveContainer" containerID="b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a" Nov 25 21:31:19 crc kubenswrapper[4910]: E1125 21:31:19.569277 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.573322 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4d503d4a-1acf-4d36-a9fc-a33c8255e4ff-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-wp6p4\" (UID: \"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.573352 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4d503d4a-1acf-4d36-a9fc-a33c8255e4ff-env-overrides\") pod \"ovnkube-control-plane-749d76644c-wp6p4\" (UID: \"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.573398 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ml2qb\" (UniqueName: \"kubernetes.io/projected/4d503d4a-1acf-4d36-a9fc-a33c8255e4ff-kube-api-access-ml2qb\") pod \"ovnkube-control-plane-749d76644c-wp6p4\" (UID: \"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.573432 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4d503d4a-1acf-4d36-a9fc-a33c8255e4ff-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-wp6p4\" (UID: \"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.573762 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3df9f4fca222fc835eaed18778a61fe48c5484672b55a0a75809741abe796943\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:17Z\\\",\\\"message\\\":\\\"controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:17.062130 6179 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 21:31:17.062263 6179 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 21:31:17.062313 6179 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI1125 21:31:17.062347 6179 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI1125 21:31:17.062377 6179 handler.go:208] Removed *v1.Pod event handler 3\\\\nI1125 21:31:17.062388 6179 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 21:31:17.062413 6179 handler.go:208] Removed *v1.Pod event handler 6\\\\nI1125 21:31:17.062432 6179 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1125 21:31:17.062461 6179 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1125 21:31:17.062475 6179 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1125 21:31:17.062507 6179 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 21:31:17.062514 6179 factory.go:656] Stopping watch factory\\\\nI1125 21:31:17.062574 6179 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1125 21:31:17.062579 6179 ovnkube.go:599] Stopped ovnkube\\\\nI1125 21:31:17.062526 6179 handler.go:208] Removed *v1.Node event handler 7\\\\nI11\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:18Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429340 6330 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429448 6330 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429528 6330 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429588 6330 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429648 6330 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429719 6330 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429827 6330 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429996 6330 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.573954 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4d503d4a-1acf-4d36-a9fc-a33c8255e4ff-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-wp6p4\" (UID: \"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.574342 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4d503d4a-1acf-4d36-a9fc-a33c8255e4ff-env-overrides\") pod \"ovnkube-control-plane-749d76644c-wp6p4\" (UID: \"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.583475 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4d503d4a-1acf-4d36-a9fc-a33c8255e4ff-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-wp6p4\" (UID: \"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.587071 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.594444 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ml2qb\" (UniqueName: \"kubernetes.io/projected/4d503d4a-1acf-4d36-a9fc-a33c8255e4ff-kube-api-access-ml2qb\") pod \"ovnkube-control-plane-749d76644c-wp6p4\" (UID: \"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.603142 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.618865 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.629168 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.638360 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.649687 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.659568 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.659596 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.659603 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.659626 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.659635 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:19Z","lastTransitionTime":"2025-11-25T21:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.663301 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.676348 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.691221 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.693529 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.706262 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.729085 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:18Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429340 6330 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429448 6330 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429528 6330 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429588 6330 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429648 6330 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429719 6330 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429827 6330 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429996 6330 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.738551 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.750464 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.765691 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.765738 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.765749 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.765768 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.765782 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:19Z","lastTransitionTime":"2025-11-25T21:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.769712 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.780366 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.792849 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.802386 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.813577 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:19Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.868493 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.868530 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.868540 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.868555 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.868564 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:19Z","lastTransitionTime":"2025-11-25T21:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.976712 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.976761 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.976774 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.976791 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:19 crc kubenswrapper[4910]: I1125 21:31:19.976805 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:19Z","lastTransitionTime":"2025-11-25T21:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.079403 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.079439 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.079447 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.079460 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.079471 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:20Z","lastTransitionTime":"2025-11-25T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.181824 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.181856 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.181865 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.181881 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.181890 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:20Z","lastTransitionTime":"2025-11-25T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.203708 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.203750 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.203774 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:20 crc kubenswrapper[4910]: E1125 21:31:20.203867 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:20 crc kubenswrapper[4910]: E1125 21:31:20.203999 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:20 crc kubenswrapper[4910]: E1125 21:31:20.204119 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.284235 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.284304 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.284317 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.284333 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.284343 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:20Z","lastTransitionTime":"2025-11-25T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.386325 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.386375 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.386388 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.386406 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.386420 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:20Z","lastTransitionTime":"2025-11-25T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.489971 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.490011 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.490019 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.490034 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.490044 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:20Z","lastTransitionTime":"2025-11-25T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.577413 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" event={"ID":"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff","Type":"ContainerStarted","Data":"c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182"} Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.577475 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" event={"ID":"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff","Type":"ContainerStarted","Data":"27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9"} Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.577492 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" event={"ID":"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff","Type":"ContainerStarted","Data":"52381d469853c3aeaa1bc18e5911baa7d3d06e7176036488cd6befb29e3d3e7b"} Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.593386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.593444 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.593462 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.593486 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.593502 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:20Z","lastTransitionTime":"2025-11-25T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.601059 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.620650 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.638106 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.652677 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.673203 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.691425 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.696656 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.696736 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.696758 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.696792 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.696812 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:20Z","lastTransitionTime":"2025-11-25T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.707673 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.722391 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.735570 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.750871 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.763046 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.775968 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.792664 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.800508 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.800551 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.800564 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.800587 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.800602 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:20Z","lastTransitionTime":"2025-11-25T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.809087 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.842613 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:18Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429340 6330 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429448 6330 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429528 6330 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429588 6330 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429648 6330 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429719 6330 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429827 6330 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429996 6330 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.885016 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-m4q5p"] Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.885913 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:20 crc kubenswrapper[4910]: E1125 21:31:20.886038 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.903760 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.903828 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.903845 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.903874 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.903875 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.903894 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:20Z","lastTransitionTime":"2025-11-25T21:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.922141 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.941874 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.969062 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:18Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429340 6330 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429448 6330 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429528 6330 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429588 6330 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429648 6330 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429719 6330 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429827 6330 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429996 6330 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.984894 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:20Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.987499 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs\") pod \"network-metrics-daemon-m4q5p\" (UID: \"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\") " pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:20 crc kubenswrapper[4910]: I1125 21:31:20.987566 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5c4sn\" (UniqueName: \"kubernetes.io/projected/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-kube-api-access-5c4sn\") pod \"network-metrics-daemon-m4q5p\" (UID: \"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\") " pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.006349 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:21Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.007081 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.007152 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.007172 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.007204 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.007223 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:21Z","lastTransitionTime":"2025-11-25T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.023232 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:21Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.043101 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:21Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.064058 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:21Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.082102 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:21Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.088567 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs\") pod \"network-metrics-daemon-m4q5p\" (UID: \"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\") " pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.088705 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5c4sn\" (UniqueName: \"kubernetes.io/projected/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-kube-api-access-5c4sn\") pod \"network-metrics-daemon-m4q5p\" (UID: \"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\") " pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.088832 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.088983 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs podName:72d787b6-8fd2-4a83-9e8f-2654fdad81c9 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:21.58894006 +0000 UTC m=+37.051416412 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs") pod "network-metrics-daemon-m4q5p" (UID: "72d787b6-8fd2-4a83-9e8f-2654fdad81c9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.101202 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:21Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.109815 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.109906 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.109933 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.109973 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.110004 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:21Z","lastTransitionTime":"2025-11-25T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.116759 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:21Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.122345 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5c4sn\" (UniqueName: \"kubernetes.io/projected/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-kube-api-access-5c4sn\") pod \"network-metrics-daemon-m4q5p\" (UID: \"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\") " pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.138907 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:21Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.149408 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:21Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.160893 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:21Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.176050 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:21Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.211877 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.211930 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.211941 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.211959 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.211972 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:21Z","lastTransitionTime":"2025-11-25T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.315609 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.315683 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.315702 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.315728 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.315745 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:21Z","lastTransitionTime":"2025-11-25T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.392733 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.392992 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.393056 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:31:37.392995987 +0000 UTC m=+52.855472349 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.393160 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.393196 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.393232 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.393350 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:37.393315935 +0000 UTC m=+52.855792437 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.393446 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.393515 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.393553 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.393569 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.393587 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.393630 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.393642 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:37.393616203 +0000 UTC m=+52.856092575 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.393574 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.393659 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.393718 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:37.393698525 +0000 UTC m=+52.856174897 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.393758 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:37.393736506 +0000 UTC m=+52.856213068 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.419521 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.419575 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.419586 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.419605 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.419619 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:21Z","lastTransitionTime":"2025-11-25T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.522009 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.522057 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.522070 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.522088 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.522100 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:21Z","lastTransitionTime":"2025-11-25T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.595664 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs\") pod \"network-metrics-daemon-m4q5p\" (UID: \"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\") " pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.595877 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:31:21 crc kubenswrapper[4910]: E1125 21:31:21.595967 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs podName:72d787b6-8fd2-4a83-9e8f-2654fdad81c9 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:22.595942991 +0000 UTC m=+38.058419323 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs") pod "network-metrics-daemon-m4q5p" (UID: "72d787b6-8fd2-4a83-9e8f-2654fdad81c9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.625102 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.625191 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.625214 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.625282 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.625313 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:21Z","lastTransitionTime":"2025-11-25T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.727937 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.728020 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.728040 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.728070 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.728088 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:21Z","lastTransitionTime":"2025-11-25T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.831033 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.831450 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.831501 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.831529 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.831549 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:21Z","lastTransitionTime":"2025-11-25T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.934532 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.934585 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.934601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.934622 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:21 crc kubenswrapper[4910]: I1125 21:31:21.934638 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:21Z","lastTransitionTime":"2025-11-25T21:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.028457 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.028495 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.028505 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.028519 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.028528 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:22Z","lastTransitionTime":"2025-11-25T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:22 crc kubenswrapper[4910]: E1125 21:31:22.042699 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.049335 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.049379 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.049392 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.049410 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.049421 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:22Z","lastTransitionTime":"2025-11-25T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:22 crc kubenswrapper[4910]: E1125 21:31:22.060844 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.064631 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.064682 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.064694 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.064707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.064715 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:22Z","lastTransitionTime":"2025-11-25T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:22 crc kubenswrapper[4910]: E1125 21:31:22.075848 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.080839 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.080893 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.080902 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.080916 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.080926 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:22Z","lastTransitionTime":"2025-11-25T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:22 crc kubenswrapper[4910]: E1125 21:31:22.093196 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.096990 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.097039 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.097422 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.097766 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.097787 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:22Z","lastTransitionTime":"2025-11-25T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:22 crc kubenswrapper[4910]: E1125 21:31:22.117868 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: E1125 21:31:22.117988 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.120622 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.120788 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.120805 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.120831 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.120849 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:22Z","lastTransitionTime":"2025-11-25T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.203299 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.203349 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.203450 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.203461 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:22 crc kubenswrapper[4910]: E1125 21:31:22.203495 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:22 crc kubenswrapper[4910]: E1125 21:31:22.203745 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.204061 4910 scope.go:117] "RemoveContainer" containerID="2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c" Nov 25 21:31:22 crc kubenswrapper[4910]: E1125 21:31:22.204128 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:22 crc kubenswrapper[4910]: E1125 21:31:22.204281 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.227454 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.227529 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.227553 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.227578 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.227597 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:22Z","lastTransitionTime":"2025-11-25T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.330635 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.330690 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.330711 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.330741 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.330761 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:22Z","lastTransitionTime":"2025-11-25T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.435275 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.435366 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.435391 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.435420 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.435440 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:22Z","lastTransitionTime":"2025-11-25T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.538304 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.538348 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.538362 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.538379 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.538391 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:22Z","lastTransitionTime":"2025-11-25T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.588409 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.590963 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a"} Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.591291 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.609332 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.623652 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.634968 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs\") pod \"network-metrics-daemon-m4q5p\" (UID: \"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\") " pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.635118 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: E1125 21:31:22.635235 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:31:22 crc kubenswrapper[4910]: E1125 21:31:22.635397 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs podName:72d787b6-8fd2-4a83-9e8f-2654fdad81c9 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:24.635362239 +0000 UTC m=+40.097838601 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs") pod "network-metrics-daemon-m4q5p" (UID: "72d787b6-8fd2-4a83-9e8f-2654fdad81c9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.641309 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.641372 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.641393 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.641422 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.641442 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:22Z","lastTransitionTime":"2025-11-25T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.652133 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.666371 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.695819 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:18Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429340 6330 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429448 6330 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429528 6330 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429588 6330 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429648 6330 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429719 6330 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429827 6330 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429996 6330 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.717048 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.737190 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.743867 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.743938 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.743952 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.743968 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.743981 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:22Z","lastTransitionTime":"2025-11-25T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.755722 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.773953 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.796722 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.811125 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.824888 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.836623 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.851345 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.853651 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.853706 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.853718 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.853735 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.853747 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:22Z","lastTransitionTime":"2025-11-25T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.863736 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:22Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.956130 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.956171 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.956180 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.956194 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:22 crc kubenswrapper[4910]: I1125 21:31:22.956287 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:22Z","lastTransitionTime":"2025-11-25T21:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.059513 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.059548 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.059556 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.059570 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.059579 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:23Z","lastTransitionTime":"2025-11-25T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.162562 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.162596 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.162604 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.162617 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.162626 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:23Z","lastTransitionTime":"2025-11-25T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.265405 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.265459 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.265471 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.265490 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.265502 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:23Z","lastTransitionTime":"2025-11-25T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.367658 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.367711 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.367723 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.367743 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.367757 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:23Z","lastTransitionTime":"2025-11-25T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.470549 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.470590 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.470602 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.470647 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.470663 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:23Z","lastTransitionTime":"2025-11-25T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.573823 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.573860 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.573870 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.573884 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.573894 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:23Z","lastTransitionTime":"2025-11-25T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.676722 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.676770 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.676782 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.676801 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.676814 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:23Z","lastTransitionTime":"2025-11-25T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.779142 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.779186 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.779195 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.779211 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.779225 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:23Z","lastTransitionTime":"2025-11-25T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.882448 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.882507 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.882528 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.882557 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.882582 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:23Z","lastTransitionTime":"2025-11-25T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.986006 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.986076 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.986089 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.986111 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:23 crc kubenswrapper[4910]: I1125 21:31:23.986126 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:23Z","lastTransitionTime":"2025-11-25T21:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.089348 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.089607 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.089634 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.089668 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.089693 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:24Z","lastTransitionTime":"2025-11-25T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.193129 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.193205 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.193224 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.193281 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.193303 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:24Z","lastTransitionTime":"2025-11-25T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.203976 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.204012 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.204007 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.204059 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:24 crc kubenswrapper[4910]: E1125 21:31:24.204234 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:24 crc kubenswrapper[4910]: E1125 21:31:24.204350 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:24 crc kubenswrapper[4910]: E1125 21:31:24.204500 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:24 crc kubenswrapper[4910]: E1125 21:31:24.204676 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.296946 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.297058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.297117 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.297151 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.297208 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:24Z","lastTransitionTime":"2025-11-25T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.400166 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.400278 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.400301 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.400331 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.400354 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:24Z","lastTransitionTime":"2025-11-25T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.504393 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.504512 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.504543 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.504579 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.504605 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:24Z","lastTransitionTime":"2025-11-25T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.608348 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.608474 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.608495 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.608523 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.608544 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:24Z","lastTransitionTime":"2025-11-25T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.657617 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs\") pod \"network-metrics-daemon-m4q5p\" (UID: \"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\") " pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:24 crc kubenswrapper[4910]: E1125 21:31:24.657956 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:31:24 crc kubenswrapper[4910]: E1125 21:31:24.658091 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs podName:72d787b6-8fd2-4a83-9e8f-2654fdad81c9 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:28.658057808 +0000 UTC m=+44.120534170 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs") pod "network-metrics-daemon-m4q5p" (UID: "72d787b6-8fd2-4a83-9e8f-2654fdad81c9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.711664 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.711741 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.711768 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.711834 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.711868 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:24Z","lastTransitionTime":"2025-11-25T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.816046 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.816115 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.816133 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.816166 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.816186 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:24Z","lastTransitionTime":"2025-11-25T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.919271 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.919397 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.919424 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.919477 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:24 crc kubenswrapper[4910]: I1125 21:31:24.919505 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:24Z","lastTransitionTime":"2025-11-25T21:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.023917 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.023975 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.023994 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.024014 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.024027 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:25Z","lastTransitionTime":"2025-11-25T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.126958 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.127009 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.127026 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.127050 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.127066 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:25Z","lastTransitionTime":"2025-11-25T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.221943 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.229984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.230023 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.230039 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.230059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.230075 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:25Z","lastTransitionTime":"2025-11-25T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.236744 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.250839 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.267210 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.288292 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.303971 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.321764 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.334672 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.334716 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.334730 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.334749 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.334766 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:25Z","lastTransitionTime":"2025-11-25T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.338549 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.354262 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.379690 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:18Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429340 6330 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429448 6330 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429528 6330 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429588 6330 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429648 6330 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429719 6330 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429827 6330 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429996 6330 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.398527 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.414802 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.431976 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.437046 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.437081 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.437092 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.437109 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.437123 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:25Z","lastTransitionTime":"2025-11-25T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.449936 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.467655 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.483013 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.539458 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.539515 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.539527 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.539549 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.539563 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:25Z","lastTransitionTime":"2025-11-25T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.642065 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.642309 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.642409 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.642481 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.642546 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:25Z","lastTransitionTime":"2025-11-25T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.745647 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.746317 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.746339 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.746360 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.746373 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:25Z","lastTransitionTime":"2025-11-25T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.848883 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.848934 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.848950 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.848971 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.848984 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:25Z","lastTransitionTime":"2025-11-25T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.951539 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.951848 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.951951 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.952056 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:25 crc kubenswrapper[4910]: I1125 21:31:25.952152 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:25Z","lastTransitionTime":"2025-11-25T21:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.055058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.055139 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.055162 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.055189 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.055207 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:26Z","lastTransitionTime":"2025-11-25T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.159346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.159399 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.159410 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.159429 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.159443 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:26Z","lastTransitionTime":"2025-11-25T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.203205 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.203307 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.203311 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:26 crc kubenswrapper[4910]: E1125 21:31:26.203397 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.203445 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:26 crc kubenswrapper[4910]: E1125 21:31:26.203583 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:26 crc kubenswrapper[4910]: E1125 21:31:26.203719 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:26 crc kubenswrapper[4910]: E1125 21:31:26.203979 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.262062 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.262101 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.262112 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.262127 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.262136 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:26Z","lastTransitionTime":"2025-11-25T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.364927 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.364982 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.364996 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.365021 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.365036 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:26Z","lastTransitionTime":"2025-11-25T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.467497 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.467574 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.467589 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.467636 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.467651 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:26Z","lastTransitionTime":"2025-11-25T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.570975 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.571046 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.571064 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.571091 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.571109 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:26Z","lastTransitionTime":"2025-11-25T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.673874 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.673979 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.674009 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.674051 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.674075 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:26Z","lastTransitionTime":"2025-11-25T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.777922 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.778005 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.778025 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.778058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.778088 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:26Z","lastTransitionTime":"2025-11-25T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.881135 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.881477 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.881577 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.881683 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.881784 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:26Z","lastTransitionTime":"2025-11-25T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.985659 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.986014 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.986138 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.986276 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:26 crc kubenswrapper[4910]: I1125 21:31:26.986408 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:26Z","lastTransitionTime":"2025-11-25T21:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.089739 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.089821 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.089841 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.089871 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.089891 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:27Z","lastTransitionTime":"2025-11-25T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.193143 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.193213 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.193234 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.193289 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.193310 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:27Z","lastTransitionTime":"2025-11-25T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.296213 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.296293 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.296306 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.296328 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.296341 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:27Z","lastTransitionTime":"2025-11-25T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.399352 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.399399 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.399413 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.399433 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.399447 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:27Z","lastTransitionTime":"2025-11-25T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.503362 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.503443 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.503477 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.503509 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.503531 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:27Z","lastTransitionTime":"2025-11-25T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.606638 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.606701 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.606720 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.606746 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.606765 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:27Z","lastTransitionTime":"2025-11-25T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.710074 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.710120 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.710132 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.710148 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.710160 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:27Z","lastTransitionTime":"2025-11-25T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.812379 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.812423 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.812434 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.812457 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.812469 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:27Z","lastTransitionTime":"2025-11-25T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.914957 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.915056 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.915084 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.915118 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:27 crc kubenswrapper[4910]: I1125 21:31:27.915150 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:27Z","lastTransitionTime":"2025-11-25T21:31:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.018804 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.018866 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.018889 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.018922 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.018945 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:28Z","lastTransitionTime":"2025-11-25T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.122489 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.122541 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.122553 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.122571 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.122583 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:28Z","lastTransitionTime":"2025-11-25T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.203420 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.203460 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:28 crc kubenswrapper[4910]: E1125 21:31:28.203588 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.203608 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:28 crc kubenswrapper[4910]: E1125 21:31:28.203679 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:28 crc kubenswrapper[4910]: E1125 21:31:28.203767 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.204207 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:28 crc kubenswrapper[4910]: E1125 21:31:28.204596 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.226198 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.226264 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.226277 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.226296 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.226310 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:28Z","lastTransitionTime":"2025-11-25T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.329403 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.329470 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.329488 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.329513 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.329531 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:28Z","lastTransitionTime":"2025-11-25T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.432479 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.432559 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.432581 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.432613 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.432633 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:28Z","lastTransitionTime":"2025-11-25T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.535817 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.535885 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.535902 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.535929 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.535949 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:28Z","lastTransitionTime":"2025-11-25T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.639562 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.639622 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.639635 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.639658 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.639672 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:28Z","lastTransitionTime":"2025-11-25T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.709222 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs\") pod \"network-metrics-daemon-m4q5p\" (UID: \"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\") " pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:28 crc kubenswrapper[4910]: E1125 21:31:28.709505 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:31:28 crc kubenswrapper[4910]: E1125 21:31:28.709660 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs podName:72d787b6-8fd2-4a83-9e8f-2654fdad81c9 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:36.709623972 +0000 UTC m=+52.172100314 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs") pod "network-metrics-daemon-m4q5p" (UID: "72d787b6-8fd2-4a83-9e8f-2654fdad81c9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.742629 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.742671 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.742682 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.742698 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.742708 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:28Z","lastTransitionTime":"2025-11-25T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.845886 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.845977 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.845997 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.846034 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.846061 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:28Z","lastTransitionTime":"2025-11-25T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.948478 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.948661 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.948683 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.948751 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:28 crc kubenswrapper[4910]: I1125 21:31:28.948769 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:28Z","lastTransitionTime":"2025-11-25T21:31:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.052502 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.052607 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.052626 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.052650 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.052668 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:29Z","lastTransitionTime":"2025-11-25T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.156421 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.156498 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.156520 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.156550 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.156573 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:29Z","lastTransitionTime":"2025-11-25T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.259488 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.259615 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.259648 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.259688 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.259716 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:29Z","lastTransitionTime":"2025-11-25T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.362947 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.363018 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.363041 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.363073 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.363095 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:29Z","lastTransitionTime":"2025-11-25T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.466361 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.466442 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.466455 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.466500 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.466525 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:29Z","lastTransitionTime":"2025-11-25T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.569782 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.569837 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.569851 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.569873 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.569884 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:29Z","lastTransitionTime":"2025-11-25T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.673144 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.673227 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.673287 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.673320 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.673346 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:29Z","lastTransitionTime":"2025-11-25T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.776321 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.776362 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.776371 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.776386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.776397 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:29Z","lastTransitionTime":"2025-11-25T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.879420 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.879481 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.879503 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.879531 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.879546 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:29Z","lastTransitionTime":"2025-11-25T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.982869 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.982925 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.982935 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.982955 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:29 crc kubenswrapper[4910]: I1125 21:31:29.982967 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:29Z","lastTransitionTime":"2025-11-25T21:31:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.085937 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.086025 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.086047 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.086079 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.086102 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:30Z","lastTransitionTime":"2025-11-25T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.189216 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.189314 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.189339 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.189369 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.189392 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:30Z","lastTransitionTime":"2025-11-25T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.203815 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.203911 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:30 crc kubenswrapper[4910]: E1125 21:31:30.203965 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.204064 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:30 crc kubenswrapper[4910]: E1125 21:31:30.204059 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.204107 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:30 crc kubenswrapper[4910]: E1125 21:31:30.204385 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:30 crc kubenswrapper[4910]: E1125 21:31:30.204453 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.205068 4910 scope.go:117] "RemoveContainer" containerID="b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.292655 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.293234 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.293319 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.293398 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.293427 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:30Z","lastTransitionTime":"2025-11-25T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.398170 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.398229 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.398278 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.398338 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.398361 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:30Z","lastTransitionTime":"2025-11-25T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.501975 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.502063 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.502082 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.502118 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.502139 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:30Z","lastTransitionTime":"2025-11-25T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.605552 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.605604 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.605616 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.605640 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.605652 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:30Z","lastTransitionTime":"2025-11-25T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.621622 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/1.log" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.625524 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerStarted","Data":"60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111"} Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.625657 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.662147 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:18Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429340 6330 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429448 6330 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429528 6330 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429588 6330 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429648 6330 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429719 6330 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429827 6330 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429996 6330 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.685540 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.708933 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.708994 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.709008 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.709029 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.709043 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:30Z","lastTransitionTime":"2025-11-25T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.711490 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.737684 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.760545 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.787037 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.807754 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.812113 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.812202 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.812230 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.812319 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.812378 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:30Z","lastTransitionTime":"2025-11-25T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.826002 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.840922 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.859704 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.871592 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.888506 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.909032 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.912848 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.914675 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.914706 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.914715 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.914732 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.914742 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:30Z","lastTransitionTime":"2025-11-25T21:31:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.923763 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.940532 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:30 crc kubenswrapper[4910]: I1125 21:31:30.963928 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:30Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.017415 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.017457 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.017465 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.017479 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.017490 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:31Z","lastTransitionTime":"2025-11-25T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.120600 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.120666 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.120686 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.120718 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.120736 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:31Z","lastTransitionTime":"2025-11-25T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.223034 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.223105 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.223126 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.223155 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.223176 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:31Z","lastTransitionTime":"2025-11-25T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.326198 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.326259 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.326271 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.326287 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.326297 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:31Z","lastTransitionTime":"2025-11-25T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.429011 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.429066 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.429078 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.429101 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.429114 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:31Z","lastTransitionTime":"2025-11-25T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.534066 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.534150 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.534171 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.534220 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.534317 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:31Z","lastTransitionTime":"2025-11-25T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.633811 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/2.log" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.635116 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/1.log" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.636898 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.636971 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.636994 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.637028 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.637059 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:31Z","lastTransitionTime":"2025-11-25T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.639748 4910 generic.go:334] "Generic (PLEG): container finished" podID="4cf48d68-85c8-45e7-8533-550e120eca12" containerID="60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111" exitCode=1 Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.639810 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerDied","Data":"60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111"} Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.639874 4910 scope.go:117] "RemoveContainer" containerID="b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.642744 4910 scope.go:117] "RemoveContainer" containerID="60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111" Nov 25 21:31:31 crc kubenswrapper[4910]: E1125 21:31:31.643322 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.669347 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.693154 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.710098 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.730180 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.740117 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.740180 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.740197 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.740219 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.740233 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:31Z","lastTransitionTime":"2025-11-25T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.749281 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.767685 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.793200 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b3a01dfadfa673e13ee464df6fe42e40894cfce2baeff9e60463787a79fff27a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:18Z\\\",\\\"message\\\":\\\"311] Stopping reflector *v1alpha1.AdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429340 6330 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1125 21:31:18.429448 6330 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429528 6330 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429588 6330 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429648 6330 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429719 6330 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429827 6330 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:18.429996 6330 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:31Z\\\",\\\"message\\\":\\\"1.381405 6566 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:31.381501 6566 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.381504 6566 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.382310 6566 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 21:31:31.382390 6566 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 21:31:31.382417 6566 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 21:31:31.382429 6566 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 21:31:31.382445 6566 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 21:31:31.382475 6566 factory.go:656] Stopping watch factory\\\\nI1125 21:31:31.382475 6566 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 21:31:31.382482 6566 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 21:31:31.382503 6566 ovnkube.go:599] Stopped ovnkube\\\\nI1125 21\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.807647 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.824026 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.841067 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.842761 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.842806 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.842816 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.843191 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.843227 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:31Z","lastTransitionTime":"2025-11-25T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.855264 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.870029 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.884752 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.902401 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.921589 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.935808 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:31Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.946652 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.946681 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.946691 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.946710 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:31 crc kubenswrapper[4910]: I1125 21:31:31.946722 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:31Z","lastTransitionTime":"2025-11-25T21:31:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.049825 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.049871 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.049880 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.049898 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.049909 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:32Z","lastTransitionTime":"2025-11-25T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.153182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.153281 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.153302 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.153332 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.153357 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:32Z","lastTransitionTime":"2025-11-25T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.203428 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.203489 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.203490 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.203637 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:32 crc kubenswrapper[4910]: E1125 21:31:32.203647 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:32 crc kubenswrapper[4910]: E1125 21:31:32.203900 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:32 crc kubenswrapper[4910]: E1125 21:31:32.203974 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:32 crc kubenswrapper[4910]: E1125 21:31:32.203994 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.257066 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.257133 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.257152 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.257178 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.257198 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:32Z","lastTransitionTime":"2025-11-25T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.360993 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.361075 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.361096 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.361124 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.361147 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:32Z","lastTransitionTime":"2025-11-25T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.372173 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.372271 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.372296 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.372323 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.372343 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:32Z","lastTransitionTime":"2025-11-25T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: E1125 21:31:32.392884 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.399183 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.399299 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.399323 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.399355 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.399381 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:32Z","lastTransitionTime":"2025-11-25T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: E1125 21:31:32.419659 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.426105 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.426164 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.426182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.426206 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.426224 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:32Z","lastTransitionTime":"2025-11-25T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: E1125 21:31:32.446148 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.451922 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.451996 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.452012 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.452035 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.452050 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:32Z","lastTransitionTime":"2025-11-25T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: E1125 21:31:32.467370 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.471137 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.471217 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.471233 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.471276 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.471292 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:32Z","lastTransitionTime":"2025-11-25T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: E1125 21:31:32.487350 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: E1125 21:31:32.487461 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.494993 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.495050 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.495084 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.495115 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.495128 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:32Z","lastTransitionTime":"2025-11-25T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.599748 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.599838 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.599860 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.599892 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.599993 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:32Z","lastTransitionTime":"2025-11-25T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.650798 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/2.log" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.675960 4910 scope.go:117] "RemoveContainer" containerID="60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111" Nov 25 21:31:32 crc kubenswrapper[4910]: E1125 21:31:32.676323 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.691670 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.704221 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.704340 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.704360 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.704386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.704406 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:32Z","lastTransitionTime":"2025-11-25T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.712653 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.729369 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.743601 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.759577 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.775886 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.796034 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.806851 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.806899 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.806913 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.806932 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.806949 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:32Z","lastTransitionTime":"2025-11-25T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.819989 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.842817 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:31Z\\\",\\\"message\\\":\\\"1.381405 6566 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:31.381501 6566 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.381504 6566 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.382310 6566 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 21:31:31.382390 6566 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 21:31:31.382417 6566 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 21:31:31.382429 6566 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 21:31:31.382445 6566 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 21:31:31.382475 6566 factory.go:656] Stopping watch factory\\\\nI1125 21:31:31.382475 6566 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 21:31:31.382482 6566 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 21:31:31.382503 6566 ovnkube.go:599] Stopped ovnkube\\\\nI1125 21\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.855608 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.870213 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.891760 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.907441 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.910020 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.910062 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.910074 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.910090 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.910101 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:32Z","lastTransitionTime":"2025-11-25T21:31:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.929375 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.944102 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:32 crc kubenswrapper[4910]: I1125 21:31:32.994065 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:32Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.012699 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.012733 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.012741 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.012755 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.012765 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:33Z","lastTransitionTime":"2025-11-25T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.115462 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.115508 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.115521 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.115538 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.115550 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:33Z","lastTransitionTime":"2025-11-25T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.217849 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.217890 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.217898 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.217912 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.217922 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:33Z","lastTransitionTime":"2025-11-25T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.319847 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.319902 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.319929 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.319944 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.319953 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:33Z","lastTransitionTime":"2025-11-25T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.422140 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.422180 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.422190 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.422206 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.422221 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:33Z","lastTransitionTime":"2025-11-25T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.524536 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.524576 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.524587 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.524604 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.524616 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:33Z","lastTransitionTime":"2025-11-25T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.627304 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.627373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.627392 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.627414 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.627432 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:33Z","lastTransitionTime":"2025-11-25T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.729675 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.729799 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.729824 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.729855 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.729879 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:33Z","lastTransitionTime":"2025-11-25T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.833318 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.833346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.833354 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.833367 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.833375 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:33Z","lastTransitionTime":"2025-11-25T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.936298 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.936343 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.936359 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.936380 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:33 crc kubenswrapper[4910]: I1125 21:31:33.936396 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:33Z","lastTransitionTime":"2025-11-25T21:31:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.039017 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.039052 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.039080 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.039093 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.039101 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:34Z","lastTransitionTime":"2025-11-25T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.142543 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.142836 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.142979 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.143106 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.143216 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:34Z","lastTransitionTime":"2025-11-25T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.203853 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.203852 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:34 crc kubenswrapper[4910]: E1125 21:31:34.204210 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.204028 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:34 crc kubenswrapper[4910]: E1125 21:31:34.204742 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:34 crc kubenswrapper[4910]: E1125 21:31:34.204441 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.203944 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:34 crc kubenswrapper[4910]: E1125 21:31:34.205229 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.245546 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.245640 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.245660 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.245756 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.245771 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:34Z","lastTransitionTime":"2025-11-25T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.349274 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.349612 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.349840 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.350043 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.350192 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:34Z","lastTransitionTime":"2025-11-25T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.452884 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.452933 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.452949 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.452973 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.452991 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:34Z","lastTransitionTime":"2025-11-25T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.556570 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.556630 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.556648 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.556678 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.556696 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:34Z","lastTransitionTime":"2025-11-25T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.658692 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.658752 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.658768 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.658790 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.658809 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:34Z","lastTransitionTime":"2025-11-25T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.761892 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.762014 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.762037 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.762071 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.762095 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:34Z","lastTransitionTime":"2025-11-25T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.864882 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.864921 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.864929 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.864943 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.864954 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:34Z","lastTransitionTime":"2025-11-25T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.967468 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.967512 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.967522 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.967537 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:34 crc kubenswrapper[4910]: I1125 21:31:34.967549 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:34Z","lastTransitionTime":"2025-11-25T21:31:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.070625 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.070683 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.070695 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.070718 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.070732 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:35Z","lastTransitionTime":"2025-11-25T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.173504 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.173552 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.173565 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.173582 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.173593 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:35Z","lastTransitionTime":"2025-11-25T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.219767 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.235520 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.252615 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.276166 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:31Z\\\",\\\"message\\\":\\\"1.381405 6566 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:31.381501 6566 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.381504 6566 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.382310 6566 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 21:31:31.382390 6566 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 21:31:31.382417 6566 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 21:31:31.382429 6566 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 21:31:31.382445 6566 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 21:31:31.382475 6566 factory.go:656] Stopping watch factory\\\\nI1125 21:31:31.382475 6566 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 21:31:31.382482 6566 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 21:31:31.382503 6566 ovnkube.go:599] Stopped ovnkube\\\\nI1125 21\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.276453 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.276493 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.276502 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.276519 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.276541 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:35Z","lastTransitionTime":"2025-11-25T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.287372 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.299370 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.322545 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.340411 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.352509 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.368842 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.378423 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.378454 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.378467 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.378484 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.378501 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:35Z","lastTransitionTime":"2025-11-25T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.386989 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.400849 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.423612 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.438989 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.453987 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.473903 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:35Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.480628 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.480662 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.480671 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.480687 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.480698 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:35Z","lastTransitionTime":"2025-11-25T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.582726 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.582760 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.582769 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.582782 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.582790 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:35Z","lastTransitionTime":"2025-11-25T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.684659 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.684715 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.684731 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.684752 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.684786 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:35Z","lastTransitionTime":"2025-11-25T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.786953 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.786992 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.787000 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.787015 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.787026 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:35Z","lastTransitionTime":"2025-11-25T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.889041 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.889103 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.889119 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.889142 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.889159 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:35Z","lastTransitionTime":"2025-11-25T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.991519 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.991584 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.991605 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.991634 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:35 crc kubenswrapper[4910]: I1125 21:31:35.991656 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:35Z","lastTransitionTime":"2025-11-25T21:31:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.094598 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.094665 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.094688 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.094718 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.094741 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:36Z","lastTransitionTime":"2025-11-25T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.197233 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.197338 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.197354 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.197379 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.197395 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:36Z","lastTransitionTime":"2025-11-25T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.203554 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.203666 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.203674 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.203800 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:36 crc kubenswrapper[4910]: E1125 21:31:36.203795 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:36 crc kubenswrapper[4910]: E1125 21:31:36.203920 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:36 crc kubenswrapper[4910]: E1125 21:31:36.204015 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:36 crc kubenswrapper[4910]: E1125 21:31:36.204091 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.300113 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.300143 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.300154 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.300170 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.300182 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:36Z","lastTransitionTime":"2025-11-25T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.402341 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.402404 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.402420 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.402444 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.402461 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:36Z","lastTransitionTime":"2025-11-25T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.505404 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.505727 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.505867 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.506058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.506333 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:36Z","lastTransitionTime":"2025-11-25T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.609432 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.609487 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.609505 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.609529 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.609546 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:36Z","lastTransitionTime":"2025-11-25T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.712757 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.712813 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.712832 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.712856 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.712874 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:36Z","lastTransitionTime":"2025-11-25T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.722736 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs\") pod \"network-metrics-daemon-m4q5p\" (UID: \"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\") " pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:36 crc kubenswrapper[4910]: E1125 21:31:36.722996 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:31:36 crc kubenswrapper[4910]: E1125 21:31:36.723114 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs podName:72d787b6-8fd2-4a83-9e8f-2654fdad81c9 nodeName:}" failed. No retries permitted until 2025-11-25 21:31:52.723078127 +0000 UTC m=+68.185554489 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs") pod "network-metrics-daemon-m4q5p" (UID: "72d787b6-8fd2-4a83-9e8f-2654fdad81c9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.816114 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.816177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.816194 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.816221 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.816282 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:36Z","lastTransitionTime":"2025-11-25T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.918662 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.919079 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.919321 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.919487 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:36 crc kubenswrapper[4910]: I1125 21:31:36.919625 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:36Z","lastTransitionTime":"2025-11-25T21:31:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.021985 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.022951 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.023156 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.023425 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.023612 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:37Z","lastTransitionTime":"2025-11-25T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.126661 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.126713 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.126730 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.126751 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.126765 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:37Z","lastTransitionTime":"2025-11-25T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.228269 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.228332 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.228347 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.228363 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.228374 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:37Z","lastTransitionTime":"2025-11-25T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.331546 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.331608 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.331622 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.331645 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.331667 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:37Z","lastTransitionTime":"2025-11-25T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.428632 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:31:37 crc kubenswrapper[4910]: E1125 21:31:37.428830 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:32:09.428786144 +0000 UTC m=+84.891262556 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.428945 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.429032 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.429102 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.429157 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:37 crc kubenswrapper[4910]: E1125 21:31:37.429109 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:31:37 crc kubenswrapper[4910]: E1125 21:31:37.429301 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:32:09.429283767 +0000 UTC m=+84.891760099 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:31:37 crc kubenswrapper[4910]: E1125 21:31:37.429202 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:31:37 crc kubenswrapper[4910]: E1125 21:31:37.429392 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:31:37 crc kubenswrapper[4910]: E1125 21:31:37.429404 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:31:37 crc kubenswrapper[4910]: E1125 21:31:37.429428 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:32:09.42940348 +0000 UTC m=+84.891879822 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:31:37 crc kubenswrapper[4910]: E1125 21:31:37.429427 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:31:37 crc kubenswrapper[4910]: E1125 21:31:37.429463 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:37 crc kubenswrapper[4910]: E1125 21:31:37.429551 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 21:32:09.429526893 +0000 UTC m=+84.892003255 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:37 crc kubenswrapper[4910]: E1125 21:31:37.429446 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:31:37 crc kubenswrapper[4910]: E1125 21:31:37.429611 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:37 crc kubenswrapper[4910]: E1125 21:31:37.429738 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 21:32:09.429710007 +0000 UTC m=+84.892186369 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.437393 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.437463 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.437473 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.437488 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.437496 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:37Z","lastTransitionTime":"2025-11-25T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.540031 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.540070 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.540079 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.540110 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.540121 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:37Z","lastTransitionTime":"2025-11-25T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.642591 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.642657 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.642672 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.643051 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.643088 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:37Z","lastTransitionTime":"2025-11-25T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.745494 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.745556 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.745576 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.745601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.745619 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:37Z","lastTransitionTime":"2025-11-25T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.848216 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.848301 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.848319 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.848345 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.848364 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:37Z","lastTransitionTime":"2025-11-25T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.951096 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.951149 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.951166 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.951193 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:37 crc kubenswrapper[4910]: I1125 21:31:37.951211 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:37Z","lastTransitionTime":"2025-11-25T21:31:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.055101 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.055164 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.055187 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.055216 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.055237 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:38Z","lastTransitionTime":"2025-11-25T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.157636 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.157764 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.157781 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.157803 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.157821 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:38Z","lastTransitionTime":"2025-11-25T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.203133 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.203181 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.203309 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:38 crc kubenswrapper[4910]: E1125 21:31:38.203300 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:38 crc kubenswrapper[4910]: E1125 21:31:38.203438 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:38 crc kubenswrapper[4910]: E1125 21:31:38.203580 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.203779 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:38 crc kubenswrapper[4910]: E1125 21:31:38.203985 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.261963 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.262039 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.262061 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.262092 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.262114 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:38Z","lastTransitionTime":"2025-11-25T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.364483 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.364527 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.364538 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.364554 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.364566 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:38Z","lastTransitionTime":"2025-11-25T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.467719 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.467759 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.467768 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.467788 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.467803 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:38Z","lastTransitionTime":"2025-11-25T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.570366 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.570412 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.570423 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.570442 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.570457 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:38Z","lastTransitionTime":"2025-11-25T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.672790 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.672847 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.672857 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.672874 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.672885 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:38Z","lastTransitionTime":"2025-11-25T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.774506 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.774554 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.774570 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.774591 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.774606 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:38Z","lastTransitionTime":"2025-11-25T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.876837 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.876879 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.876891 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.876908 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.876920 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:38Z","lastTransitionTime":"2025-11-25T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.979444 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.979560 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.979575 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.979592 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:38 crc kubenswrapper[4910]: I1125 21:31:38.979605 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:38Z","lastTransitionTime":"2025-11-25T21:31:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.082319 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.082443 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.082461 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.082480 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.082492 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:39Z","lastTransitionTime":"2025-11-25T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.167448 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.176766 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.184341 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.184418 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.184444 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.184476 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.184500 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:39Z","lastTransitionTime":"2025-11-25T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.187607 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.200716 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.210996 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.223047 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.236854 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.249070 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.261054 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.281959 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:31Z\\\",\\\"message\\\":\\\"1.381405 6566 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:31.381501 6566 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.381504 6566 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.382310 6566 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 21:31:31.382390 6566 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 21:31:31.382417 6566 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 21:31:31.382429 6566 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 21:31:31.382445 6566 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 21:31:31.382475 6566 factory.go:656] Stopping watch factory\\\\nI1125 21:31:31.382475 6566 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 21:31:31.382482 6566 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 21:31:31.382503 6566 ovnkube.go:599] Stopped ovnkube\\\\nI1125 21\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.286852 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.286892 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.286901 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.286914 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.286926 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:39Z","lastTransitionTime":"2025-11-25T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.292964 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.304059 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.317733 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.328389 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.341001 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.352469 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.362418 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.373759 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:39Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.389069 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.389108 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.389119 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.389138 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.389149 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:39Z","lastTransitionTime":"2025-11-25T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.492143 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.492188 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.492199 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.492216 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.492227 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:39Z","lastTransitionTime":"2025-11-25T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.594864 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.594926 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.594938 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.594957 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.594973 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:39Z","lastTransitionTime":"2025-11-25T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.696713 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.696752 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.696762 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.696779 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.696790 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:39Z","lastTransitionTime":"2025-11-25T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.799295 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.799325 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.799333 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.799349 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.799359 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:39Z","lastTransitionTime":"2025-11-25T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.901170 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.901201 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.901209 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.901223 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:39 crc kubenswrapper[4910]: I1125 21:31:39.901232 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:39Z","lastTransitionTime":"2025-11-25T21:31:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.002931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.002968 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.002976 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.003001 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.003010 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:40Z","lastTransitionTime":"2025-11-25T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.104936 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.104969 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.104976 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.104989 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.104998 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:40Z","lastTransitionTime":"2025-11-25T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.202910 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.202967 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.202937 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.202961 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:40 crc kubenswrapper[4910]: E1125 21:31:40.203063 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:40 crc kubenswrapper[4910]: E1125 21:31:40.203131 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:40 crc kubenswrapper[4910]: E1125 21:31:40.203183 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:40 crc kubenswrapper[4910]: E1125 21:31:40.203227 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.276007 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.276038 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.276046 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.276058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.276068 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:40Z","lastTransitionTime":"2025-11-25T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.378611 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.378651 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.378661 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.378676 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.378685 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:40Z","lastTransitionTime":"2025-11-25T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.480654 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.480687 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.480695 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.480709 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.480719 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:40Z","lastTransitionTime":"2025-11-25T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.583100 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.583141 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.583152 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.583170 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.583182 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:40Z","lastTransitionTime":"2025-11-25T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.685520 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.685558 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.685567 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.685580 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.685589 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:40Z","lastTransitionTime":"2025-11-25T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.787662 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.787737 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.787762 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.787786 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.787802 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:40Z","lastTransitionTime":"2025-11-25T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.820121 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.838079 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:40Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.854467 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d4f724-3075-4ed5-9fbf-0be4d8f90ae1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b5e51a456a225217f89ee36a8f88095352ef89c81aed13a59d2df6906194e00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0ebb4575cb0c108c3c669085d2c368e7be4df12aa58d929d49d495f21718f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9997d0e99ead1f836cef297dc7e4a03323addaae2fdd218e57f3e4304316e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:40Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.865165 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:40Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.876792 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:40Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.888994 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:40Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.889583 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.889623 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.889635 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.889655 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.889666 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:40Z","lastTransitionTime":"2025-11-25T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.908636 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:31Z\\\",\\\"message\\\":\\\"1.381405 6566 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:31.381501 6566 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.381504 6566 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.382310 6566 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 21:31:31.382390 6566 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 21:31:31.382417 6566 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 21:31:31.382429 6566 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 21:31:31.382445 6566 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 21:31:31.382475 6566 factory.go:656] Stopping watch factory\\\\nI1125 21:31:31.382475 6566 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 21:31:31.382482 6566 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 21:31:31.382503 6566 ovnkube.go:599] Stopped ovnkube\\\\nI1125 21\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:40Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.918757 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:40Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.930512 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:40Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.941996 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:40Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.961655 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:40Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.973602 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:40Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.985488 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:40Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.995950 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.995983 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.995999 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.996013 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.996026 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:40Z","lastTransitionTime":"2025-11-25T21:31:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:40 crc kubenswrapper[4910]: I1125 21:31:40.999189 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:40Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.010772 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:41Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.024282 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:41Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.040580 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:41Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.054464 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:41Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.097952 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.097992 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.098001 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.098016 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.098027 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:41Z","lastTransitionTime":"2025-11-25T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.200777 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.200816 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.200824 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.200839 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.200848 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:41Z","lastTransitionTime":"2025-11-25T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.303440 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.303480 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.303488 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.303500 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.303510 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:41Z","lastTransitionTime":"2025-11-25T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.406469 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.406506 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.406514 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.406527 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.406537 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:41Z","lastTransitionTime":"2025-11-25T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.509642 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.509702 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.509720 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.509744 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.509761 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:41Z","lastTransitionTime":"2025-11-25T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.612374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.612478 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.612504 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.612531 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.612553 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:41Z","lastTransitionTime":"2025-11-25T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.714810 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.714852 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.714862 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.714878 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.714890 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:41Z","lastTransitionTime":"2025-11-25T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.817352 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.817391 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.817404 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.817419 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.817430 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:41Z","lastTransitionTime":"2025-11-25T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.919484 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.919541 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.919551 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.919563 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:41 crc kubenswrapper[4910]: I1125 21:31:41.919571 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:41Z","lastTransitionTime":"2025-11-25T21:31:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.022561 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.022609 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.022620 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.022638 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.022651 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.124772 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.124829 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.124840 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.124857 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.124868 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.203395 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:42 crc kubenswrapper[4910]: E1125 21:31:42.203513 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.203714 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:42 crc kubenswrapper[4910]: E1125 21:31:42.203768 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.204034 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:42 crc kubenswrapper[4910]: E1125 21:31:42.204082 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.204118 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:42 crc kubenswrapper[4910]: E1125 21:31:42.204154 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.226349 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.226374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.226381 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.226392 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.226400 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.328755 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.328798 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.328808 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.328823 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.328844 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.430506 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.430548 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.430556 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.430571 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.430580 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.532847 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.532888 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.532897 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.532912 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.532929 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.635306 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.635344 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.635352 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.635366 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.635375 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.738616 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.738652 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.738677 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.738692 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.738703 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.782326 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.782357 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.782366 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.782380 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.782427 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: E1125 21:31:42.794854 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:42Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.798479 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.798523 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.798534 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.798552 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.798563 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: E1125 21:31:42.813732 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:42Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.816827 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.816868 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.816901 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.816918 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.816929 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: E1125 21:31:42.827108 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:42Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.829982 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.830021 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.830035 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.830052 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.830063 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: E1125 21:31:42.841666 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:42Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.844541 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.844589 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.844601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.844618 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.844629 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: E1125 21:31:42.854630 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:42Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:42 crc kubenswrapper[4910]: E1125 21:31:42.854792 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.856484 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.856520 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.856529 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.856543 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.856553 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.958821 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.958858 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.958868 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.958883 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:42 crc kubenswrapper[4910]: I1125 21:31:42.958893 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:42Z","lastTransitionTime":"2025-11-25T21:31:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.061454 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.061508 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.061517 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.061532 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.061541 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:43Z","lastTransitionTime":"2025-11-25T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.165960 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.166031 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.166050 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.166074 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.166093 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:43Z","lastTransitionTime":"2025-11-25T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.269036 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.269094 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.269111 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.269133 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.269151 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:43Z","lastTransitionTime":"2025-11-25T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.372095 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.372169 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.372193 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.372221 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.372240 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:43Z","lastTransitionTime":"2025-11-25T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.475275 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.475558 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.475625 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.475699 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.475769 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:43Z","lastTransitionTime":"2025-11-25T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.578479 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.578537 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.578555 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.578580 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.578598 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:43Z","lastTransitionTime":"2025-11-25T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.680788 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.681105 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.681186 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.681268 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.681370 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:43Z","lastTransitionTime":"2025-11-25T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.783781 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.783812 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.783821 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.783834 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.783843 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:43Z","lastTransitionTime":"2025-11-25T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.886217 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.886284 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.886296 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.886315 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.886328 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:43Z","lastTransitionTime":"2025-11-25T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.989865 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.989930 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.989954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.989985 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:43 crc kubenswrapper[4910]: I1125 21:31:43.990008 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:43Z","lastTransitionTime":"2025-11-25T21:31:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.094224 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.094306 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.094322 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.094350 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.094373 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:44Z","lastTransitionTime":"2025-11-25T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.197079 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.197186 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.197233 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.197325 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.197346 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:44Z","lastTransitionTime":"2025-11-25T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.203586 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.203636 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.203586 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:44 crc kubenswrapper[4910]: E1125 21:31:44.203780 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.203724 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:44 crc kubenswrapper[4910]: E1125 21:31:44.204003 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:44 crc kubenswrapper[4910]: E1125 21:31:44.204094 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:44 crc kubenswrapper[4910]: E1125 21:31:44.204198 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.300580 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.300639 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.300659 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.300686 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.300704 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:44Z","lastTransitionTime":"2025-11-25T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.403290 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.403324 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.403333 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.403354 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.403363 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:44Z","lastTransitionTime":"2025-11-25T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.505782 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.505876 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.505895 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.505951 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.505968 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:44Z","lastTransitionTime":"2025-11-25T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.607838 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.607888 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.607903 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.607919 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.607936 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:44Z","lastTransitionTime":"2025-11-25T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.709844 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.709897 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.709906 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.709919 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.709928 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:44Z","lastTransitionTime":"2025-11-25T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.812710 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.812741 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.812749 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.812763 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.812772 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:44Z","lastTransitionTime":"2025-11-25T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.915398 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.915440 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.915453 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.915481 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:44 crc kubenswrapper[4910]: I1125 21:31:44.915497 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:44Z","lastTransitionTime":"2025-11-25T21:31:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.019734 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.019783 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.019800 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.019839 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.019856 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:45Z","lastTransitionTime":"2025-11-25T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.122328 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.122400 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.122426 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.122452 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.122467 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:45Z","lastTransitionTime":"2025-11-25T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.217753 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.224955 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.224998 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.225007 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.225020 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.225029 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:45Z","lastTransitionTime":"2025-11-25T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.227884 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.237205 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.250162 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.261873 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.272323 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.329817 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d4f724-3075-4ed5-9fbf-0be4d8f90ae1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b5e51a456a225217f89ee36a8f88095352ef89c81aed13a59d2df6906194e00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0ebb4575cb0c108c3c669085d2c368e7be4df12aa58d929d49d495f21718f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9997d0e99ead1f836cef297dc7e4a03323addaae2fdd218e57f3e4304316e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.331595 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.331760 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.332338 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.332611 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.334391 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:45Z","lastTransitionTime":"2025-11-25T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.340739 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.353034 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.370481 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.386684 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:31Z\\\",\\\"message\\\":\\\"1.381405 6566 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:31.381501 6566 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.381504 6566 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.382310 6566 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 21:31:31.382390 6566 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 21:31:31.382417 6566 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 21:31:31.382429 6566 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 21:31:31.382445 6566 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 21:31:31.382475 6566 factory.go:656] Stopping watch factory\\\\nI1125 21:31:31.382475 6566 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 21:31:31.382482 6566 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 21:31:31.382503 6566 ovnkube.go:599] Stopped ovnkube\\\\nI1125 21\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.397328 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.407997 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.417325 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.426883 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.436973 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.437000 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.437010 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.437024 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.437035 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:45Z","lastTransitionTime":"2025-11-25T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.438782 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.447323 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:45Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.539912 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.539953 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.539964 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.539980 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.539990 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:45Z","lastTransitionTime":"2025-11-25T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.643585 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.643630 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.643640 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.643655 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.643665 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:45Z","lastTransitionTime":"2025-11-25T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.745598 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.745635 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.745647 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.745664 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.745676 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:45Z","lastTransitionTime":"2025-11-25T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.847882 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.847927 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.847942 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.847959 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.847973 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:45Z","lastTransitionTime":"2025-11-25T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.951375 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.951434 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.951455 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.951478 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:45 crc kubenswrapper[4910]: I1125 21:31:45.951497 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:45Z","lastTransitionTime":"2025-11-25T21:31:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.053821 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.053857 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.053867 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.053881 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.053891 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:46Z","lastTransitionTime":"2025-11-25T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.157847 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.157880 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.157891 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.157910 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.157920 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:46Z","lastTransitionTime":"2025-11-25T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.203295 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.203364 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.203366 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.203379 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:46 crc kubenswrapper[4910]: E1125 21:31:46.203470 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:46 crc kubenswrapper[4910]: E1125 21:31:46.203682 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:46 crc kubenswrapper[4910]: E1125 21:31:46.203791 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:46 crc kubenswrapper[4910]: E1125 21:31:46.203844 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.260304 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.260348 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.260360 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.260378 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.260394 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:46Z","lastTransitionTime":"2025-11-25T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.362350 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.362389 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.362398 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.362412 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.362421 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:46Z","lastTransitionTime":"2025-11-25T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.464067 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.464123 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.464132 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.464148 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.464159 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:46Z","lastTransitionTime":"2025-11-25T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.566485 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.566550 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.566572 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.566601 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.566623 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:46Z","lastTransitionTime":"2025-11-25T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.668187 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.668270 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.668286 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.668302 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.668311 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:46Z","lastTransitionTime":"2025-11-25T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.771573 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.771611 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.771620 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.771635 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.771660 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:46Z","lastTransitionTime":"2025-11-25T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.873206 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.873353 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.873373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.873396 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.873414 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:46Z","lastTransitionTime":"2025-11-25T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.975682 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.975717 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.975727 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.975739 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:46 crc kubenswrapper[4910]: I1125 21:31:46.975748 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:46Z","lastTransitionTime":"2025-11-25T21:31:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.079129 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.079173 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.079206 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.079223 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.079237 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:47Z","lastTransitionTime":"2025-11-25T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.181092 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.181136 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.181146 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.181162 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.181172 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:47Z","lastTransitionTime":"2025-11-25T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.283015 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.283053 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.283064 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.283081 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.283093 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:47Z","lastTransitionTime":"2025-11-25T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.384776 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.384808 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.384816 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.384830 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.384838 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:47Z","lastTransitionTime":"2025-11-25T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.489375 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.489439 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.489455 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.489480 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.489498 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:47Z","lastTransitionTime":"2025-11-25T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.591268 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.591296 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.591305 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.591318 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.591327 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:47Z","lastTransitionTime":"2025-11-25T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.693747 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.693775 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.693784 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.693795 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.693804 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:47Z","lastTransitionTime":"2025-11-25T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.795208 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.795244 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.795259 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.795301 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.795312 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:47Z","lastTransitionTime":"2025-11-25T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.897955 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.898000 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.898018 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.898041 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:47 crc kubenswrapper[4910]: I1125 21:31:47.898058 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:47Z","lastTransitionTime":"2025-11-25T21:31:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.000593 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.001008 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.001028 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.001053 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.001071 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:48Z","lastTransitionTime":"2025-11-25T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.104067 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.104124 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.104141 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.104163 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.104181 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:48Z","lastTransitionTime":"2025-11-25T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.203742 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:48 crc kubenswrapper[4910]: E1125 21:31:48.203924 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.204444 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:48 crc kubenswrapper[4910]: E1125 21:31:48.204548 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.204636 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:48 crc kubenswrapper[4910]: E1125 21:31:48.204780 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.206320 4910 scope.go:117] "RemoveContainer" containerID="60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111" Nov 25 21:31:48 crc kubenswrapper[4910]: E1125 21:31:48.206612 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.206719 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:48 crc kubenswrapper[4910]: E1125 21:31:48.206811 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.208950 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.209004 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.209026 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.209054 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.209077 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:48Z","lastTransitionTime":"2025-11-25T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.311637 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.311709 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.311725 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.311748 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.311764 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:48Z","lastTransitionTime":"2025-11-25T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.414114 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.414164 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.414172 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.414190 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.414200 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:48Z","lastTransitionTime":"2025-11-25T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.516187 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.516222 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.516232 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.516264 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.516276 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:48Z","lastTransitionTime":"2025-11-25T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.617824 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.617874 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.617888 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.617906 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.617917 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:48Z","lastTransitionTime":"2025-11-25T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.720103 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.720146 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.720161 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.720182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.720197 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:48Z","lastTransitionTime":"2025-11-25T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.821928 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.821981 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.821993 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.822011 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.822024 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:48Z","lastTransitionTime":"2025-11-25T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.924452 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.924504 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.924520 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.924542 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:48 crc kubenswrapper[4910]: I1125 21:31:48.924558 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:48Z","lastTransitionTime":"2025-11-25T21:31:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.027489 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.027564 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.027579 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.027597 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.027634 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:49Z","lastTransitionTime":"2025-11-25T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.129818 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.129870 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.129925 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.129951 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.130003 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:49Z","lastTransitionTime":"2025-11-25T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.232326 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.232370 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.232381 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.232400 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.232412 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:49Z","lastTransitionTime":"2025-11-25T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.334978 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.335015 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.335024 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.335038 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.335048 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:49Z","lastTransitionTime":"2025-11-25T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.437461 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.437500 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.437509 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.437523 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.437532 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:49Z","lastTransitionTime":"2025-11-25T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.539703 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.539758 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.539775 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.539798 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.539816 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:49Z","lastTransitionTime":"2025-11-25T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.641394 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.641421 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.641429 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.641442 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.641452 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:49Z","lastTransitionTime":"2025-11-25T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.743559 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.743603 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.743614 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.743630 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.743640 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:49Z","lastTransitionTime":"2025-11-25T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.847047 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.847142 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.847159 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.847181 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.847197 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:49Z","lastTransitionTime":"2025-11-25T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.950011 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.950055 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.950072 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.950097 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:49 crc kubenswrapper[4910]: I1125 21:31:49.950113 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:49Z","lastTransitionTime":"2025-11-25T21:31:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.052194 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.052272 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.052299 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.052323 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.052340 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:50Z","lastTransitionTime":"2025-11-25T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.154871 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.154902 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.154911 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.154924 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.154935 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:50Z","lastTransitionTime":"2025-11-25T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.203132 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.203148 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.203188 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.203188 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:50 crc kubenswrapper[4910]: E1125 21:31:50.203267 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:50 crc kubenswrapper[4910]: E1125 21:31:50.203391 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:50 crc kubenswrapper[4910]: E1125 21:31:50.203501 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:50 crc kubenswrapper[4910]: E1125 21:31:50.203554 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.257500 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.257548 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.257562 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.257580 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.257592 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:50Z","lastTransitionTime":"2025-11-25T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.359569 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.359604 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.359615 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.359631 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.359643 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:50Z","lastTransitionTime":"2025-11-25T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.462403 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.462445 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.462454 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.462470 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.462483 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:50Z","lastTransitionTime":"2025-11-25T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.564860 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.564912 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.564920 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.564936 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.564945 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:50Z","lastTransitionTime":"2025-11-25T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.666899 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.666967 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.666984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.667001 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.667012 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:50Z","lastTransitionTime":"2025-11-25T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.768954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.769000 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.769010 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.769024 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.769037 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:50Z","lastTransitionTime":"2025-11-25T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.871279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.871320 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.871331 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.871347 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.871361 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:50Z","lastTransitionTime":"2025-11-25T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.974131 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.974193 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.974209 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.974233 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:50 crc kubenswrapper[4910]: I1125 21:31:50.974277 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:50Z","lastTransitionTime":"2025-11-25T21:31:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.077177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.077289 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.077316 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.077346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.077369 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:51Z","lastTransitionTime":"2025-11-25T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.179903 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.179945 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.179955 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.179978 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.179990 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:51Z","lastTransitionTime":"2025-11-25T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.282823 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.282908 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.282931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.282960 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.282982 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:51Z","lastTransitionTime":"2025-11-25T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.384803 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.384827 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.384835 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.384848 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.384856 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:51Z","lastTransitionTime":"2025-11-25T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.486459 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.486502 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.486513 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.486529 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.486539 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:51Z","lastTransitionTime":"2025-11-25T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.588484 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.588529 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.588538 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.588552 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.588565 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:51Z","lastTransitionTime":"2025-11-25T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.691386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.691436 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.691446 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.691460 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.691469 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:51Z","lastTransitionTime":"2025-11-25T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.793324 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.793364 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.793373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.793387 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.793397 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:51Z","lastTransitionTime":"2025-11-25T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.895665 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.895695 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.895707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.895724 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.895735 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:51Z","lastTransitionTime":"2025-11-25T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.998105 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.998140 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.998149 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.998165 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:51 crc kubenswrapper[4910]: I1125 21:31:51.998175 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:51Z","lastTransitionTime":"2025-11-25T21:31:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.100209 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.100311 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.100327 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.100354 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.100368 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:52Z","lastTransitionTime":"2025-11-25T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.202876 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.202912 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.202892 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:52 crc kubenswrapper[4910]: E1125 21:31:52.203004 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:52 crc kubenswrapper[4910]: E1125 21:31:52.203282 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:52 crc kubenswrapper[4910]: E1125 21:31:52.203321 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.203654 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:52 crc kubenswrapper[4910]: E1125 21:31:52.203927 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.204428 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.204567 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.204699 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.204834 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.204955 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:52Z","lastTransitionTime":"2025-11-25T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.307321 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.307542 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.307625 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.307738 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.307827 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:52Z","lastTransitionTime":"2025-11-25T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.409864 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.409898 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.409906 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.409921 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.409930 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:52Z","lastTransitionTime":"2025-11-25T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.513171 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.513239 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.513279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.513304 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.513317 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:52Z","lastTransitionTime":"2025-11-25T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.615943 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.615984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.615996 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.616010 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.616019 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:52Z","lastTransitionTime":"2025-11-25T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.718825 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.718882 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.718896 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.718921 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.718939 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:52Z","lastTransitionTime":"2025-11-25T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.799633 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs\") pod \"network-metrics-daemon-m4q5p\" (UID: \"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\") " pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:52 crc kubenswrapper[4910]: E1125 21:31:52.799792 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:31:52 crc kubenswrapper[4910]: E1125 21:31:52.799838 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs podName:72d787b6-8fd2-4a83-9e8f-2654fdad81c9 nodeName:}" failed. No retries permitted until 2025-11-25 21:32:24.799823861 +0000 UTC m=+100.262300183 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs") pod "network-metrics-daemon-m4q5p" (UID: "72d787b6-8fd2-4a83-9e8f-2654fdad81c9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.821485 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.821561 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.821571 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.821586 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.821594 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:52Z","lastTransitionTime":"2025-11-25T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.923797 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.923842 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.923852 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.923868 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:52 crc kubenswrapper[4910]: I1125 21:31:52.923878 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:52Z","lastTransitionTime":"2025-11-25T21:31:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.026042 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.026073 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.026082 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.026096 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.026119 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.078865 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.078904 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.078912 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.078928 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.078939 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: E1125 21:31:53.089951 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:53Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.093022 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.093057 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.093069 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.093086 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.093099 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: E1125 21:31:53.107679 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:53Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.111121 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.111225 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.111339 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.111430 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.111521 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: E1125 21:31:53.121556 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:53Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.124224 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.124338 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.124396 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.124456 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.124510 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: E1125 21:31:53.134885 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:53Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.138006 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.138094 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.138155 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.138213 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.138298 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: E1125 21:31:53.148770 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:53Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:53 crc kubenswrapper[4910]: E1125 21:31:53.149054 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.150410 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.150441 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.150450 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.150465 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.150474 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.252917 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.252947 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.252958 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.252974 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.252986 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.354976 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.355002 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.355009 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.355022 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.355030 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.457332 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.457357 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.457364 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.457377 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.457385 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.559755 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.559787 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.559799 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.559817 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.559828 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.661799 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.661836 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.661848 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.661863 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.661874 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.764150 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.764182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.764190 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.764204 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.764212 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.866190 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.866223 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.866231 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.866261 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.866270 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.968233 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.968486 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.968553 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.968626 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:53 crc kubenswrapper[4910]: I1125 21:31:53.968689 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:53Z","lastTransitionTime":"2025-11-25T21:31:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.071229 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.071297 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.071314 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.071337 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.071351 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:54Z","lastTransitionTime":"2025-11-25T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.174077 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.174149 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.174172 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.174209 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.174236 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:54Z","lastTransitionTime":"2025-11-25T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.203236 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.203362 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.203668 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:54 crc kubenswrapper[4910]: E1125 21:31:54.203866 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.203912 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:54 crc kubenswrapper[4910]: E1125 21:31:54.203999 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:54 crc kubenswrapper[4910]: E1125 21:31:54.204117 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:54 crc kubenswrapper[4910]: E1125 21:31:54.204195 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.277752 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.277824 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.277836 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.277875 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.277885 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:54Z","lastTransitionTime":"2025-11-25T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.380088 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.380129 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.380138 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.380152 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.380161 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:54Z","lastTransitionTime":"2025-11-25T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.482400 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.482444 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.482456 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.482480 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.482490 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:54Z","lastTransitionTime":"2025-11-25T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.584627 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.584659 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.584668 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.584682 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.584691 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:54Z","lastTransitionTime":"2025-11-25T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.686950 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.686990 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.686999 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.687014 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.687024 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:54Z","lastTransitionTime":"2025-11-25T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.739080 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gqjcx_751fe267-dc17-4de7-81e9-a8caab9e9817/kube-multus/0.log" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.739126 4910 generic.go:334] "Generic (PLEG): container finished" podID="751fe267-dc17-4de7-81e9-a8caab9e9817" containerID="7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff" exitCode=1 Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.739161 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gqjcx" event={"ID":"751fe267-dc17-4de7-81e9-a8caab9e9817","Type":"ContainerDied","Data":"7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff"} Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.739609 4910 scope.go:117] "RemoveContainer" containerID="7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.753841 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.764379 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.773221 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.781587 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.789778 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.789816 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.789827 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.789844 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.789855 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:54Z","lastTransitionTime":"2025-11-25T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.792089 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.809055 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:31Z\\\",\\\"message\\\":\\\"1.381405 6566 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:31.381501 6566 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.381504 6566 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.382310 6566 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 21:31:31.382390 6566 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 21:31:31.382417 6566 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 21:31:31.382429 6566 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 21:31:31.382445 6566 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 21:31:31.382475 6566 factory.go:656] Stopping watch factory\\\\nI1125 21:31:31.382475 6566 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 21:31:31.382482 6566 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 21:31:31.382503 6566 ovnkube.go:599] Stopped ovnkube\\\\nI1125 21\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.818864 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.829869 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d4f724-3075-4ed5-9fbf-0be4d8f90ae1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b5e51a456a225217f89ee36a8f88095352ef89c81aed13a59d2df6906194e00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0ebb4575cb0c108c3c669085d2c368e7be4df12aa58d929d49d495f21718f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9997d0e99ead1f836cef297dc7e4a03323addaae2fdd218e57f3e4304316e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.840188 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.852436 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.862997 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:54Z\\\",\\\"message\\\":\\\"2025-11-25T21:31:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8\\\\n2025-11-25T21:31:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8 to /host/opt/cni/bin/\\\\n2025-11-25T21:31:09Z [verbose] multus-daemon started\\\\n2025-11-25T21:31:09Z [verbose] Readiness Indicator file check\\\\n2025-11-25T21:31:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.877106 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.889950 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.891588 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.891616 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.891625 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.891641 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.891651 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:54Z","lastTransitionTime":"2025-11-25T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.902660 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.914543 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.929349 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.939682 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:54Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.994293 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.994342 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.994351 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.994366 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:54 crc kubenswrapper[4910]: I1125 21:31:54.994376 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:54Z","lastTransitionTime":"2025-11-25T21:31:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.096712 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.096755 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.096766 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.096779 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.096790 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:55Z","lastTransitionTime":"2025-11-25T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.198840 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.198885 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.198894 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.198909 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.198920 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:55Z","lastTransitionTime":"2025-11-25T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.216641 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.227732 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d4f724-3075-4ed5-9fbf-0be4d8f90ae1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b5e51a456a225217f89ee36a8f88095352ef89c81aed13a59d2df6906194e00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0ebb4575cb0c108c3c669085d2c368e7be4df12aa58d929d49d495f21718f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9997d0e99ead1f836cef297dc7e4a03323addaae2fdd218e57f3e4304316e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.238486 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.249812 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.263542 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:54Z\\\",\\\"message\\\":\\\"2025-11-25T21:31:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8\\\\n2025-11-25T21:31:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8 to /host/opt/cni/bin/\\\\n2025-11-25T21:31:09Z [verbose] multus-daemon started\\\\n2025-11-25T21:31:09Z [verbose] Readiness Indicator file check\\\\n2025-11-25T21:31:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.280007 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:31Z\\\",\\\"message\\\":\\\"1.381405 6566 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:31.381501 6566 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.381504 6566 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.382310 6566 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 21:31:31.382390 6566 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 21:31:31.382417 6566 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 21:31:31.382429 6566 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 21:31:31.382445 6566 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 21:31:31.382475 6566 factory.go:656] Stopping watch factory\\\\nI1125 21:31:31.382475 6566 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 21:31:31.382482 6566 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 21:31:31.382503 6566 ovnkube.go:599] Stopped ovnkube\\\\nI1125 21\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.296748 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.301638 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.301704 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.301724 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.301751 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.301771 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:55Z","lastTransitionTime":"2025-11-25T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.325845 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.352425 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.370479 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.379933 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.390582 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.403738 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.403772 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.403781 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.403795 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.403804 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:55Z","lastTransitionTime":"2025-11-25T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.404661 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.413426 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.423826 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.436915 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.448507 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.505749 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.505810 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.505827 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.505850 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.505867 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:55Z","lastTransitionTime":"2025-11-25T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.608937 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.608984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.608994 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.609011 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.609021 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:55Z","lastTransitionTime":"2025-11-25T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.711522 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.711548 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.711556 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.711569 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.711577 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:55Z","lastTransitionTime":"2025-11-25T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.743597 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gqjcx_751fe267-dc17-4de7-81e9-a8caab9e9817/kube-multus/0.log" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.743646 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gqjcx" event={"ID":"751fe267-dc17-4de7-81e9-a8caab9e9817","Type":"ContainerStarted","Data":"1b39c30d9198f06a3a3cc78ca7e9b031782bf2c9d50dc9801caee0d5e6b9d660"} Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.760234 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.773100 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.787688 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b39c30d9198f06a3a3cc78ca7e9b031782bf2c9d50dc9801caee0d5e6b9d660\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:54Z\\\",\\\"message\\\":\\\"2025-11-25T21:31:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8\\\\n2025-11-25T21:31:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8 to /host/opt/cni/bin/\\\\n2025-11-25T21:31:09Z [verbose] multus-daemon started\\\\n2025-11-25T21:31:09Z [verbose] Readiness Indicator file check\\\\n2025-11-25T21:31:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.807166 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:31Z\\\",\\\"message\\\":\\\"1.381405 6566 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:31.381501 6566 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.381504 6566 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.382310 6566 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 21:31:31.382390 6566 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 21:31:31.382417 6566 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 21:31:31.382429 6566 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 21:31:31.382445 6566 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 21:31:31.382475 6566 factory.go:656] Stopping watch factory\\\\nI1125 21:31:31.382475 6566 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 21:31:31.382482 6566 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 21:31:31.382503 6566 ovnkube.go:599] Stopped ovnkube\\\\nI1125 21\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.813225 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.813297 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.813312 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.813329 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.813340 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:55Z","lastTransitionTime":"2025-11-25T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.817767 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.830473 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d4f724-3075-4ed5-9fbf-0be4d8f90ae1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b5e51a456a225217f89ee36a8f88095352ef89c81aed13a59d2df6906194e00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0ebb4575cb0c108c3c669085d2c368e7be4df12aa58d929d49d495f21718f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9997d0e99ead1f836cef297dc7e4a03323addaae2fdd218e57f3e4304316e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.841125 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.858031 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.868742 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.882789 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.895053 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.906403 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.916049 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.916280 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.916422 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.916573 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.916709 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:55Z","lastTransitionTime":"2025-11-25T21:31:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.919611 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.933845 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.946499 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.957311 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:55 crc kubenswrapper[4910]: I1125 21:31:55.967888 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:31:55Z is after 2025-08-24T17:21:41Z" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.018823 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.019028 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.019117 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.019198 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.019298 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:56Z","lastTransitionTime":"2025-11-25T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.121500 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.121545 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.121558 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.121576 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.121593 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:56Z","lastTransitionTime":"2025-11-25T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.203348 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.203405 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.203400 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.203426 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:56 crc kubenswrapper[4910]: E1125 21:31:56.203491 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:56 crc kubenswrapper[4910]: E1125 21:31:56.203760 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:56 crc kubenswrapper[4910]: E1125 21:31:56.203841 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:56 crc kubenswrapper[4910]: E1125 21:31:56.203893 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.223341 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.223377 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.223386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.223398 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.223406 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:56Z","lastTransitionTime":"2025-11-25T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.325590 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.325629 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.325639 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.325653 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.325663 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:56Z","lastTransitionTime":"2025-11-25T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.428300 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.428346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.428358 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.428379 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.428393 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:56Z","lastTransitionTime":"2025-11-25T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.531215 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.531266 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.531279 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.531294 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.531305 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:56Z","lastTransitionTime":"2025-11-25T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.634879 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.634917 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.634927 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.634942 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.634983 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:56Z","lastTransitionTime":"2025-11-25T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.737547 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.737600 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.737612 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.737636 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.737653 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:56Z","lastTransitionTime":"2025-11-25T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.839995 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.840220 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.840326 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.840452 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.840540 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:56Z","lastTransitionTime":"2025-11-25T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.943665 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.943710 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.943721 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.943736 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:56 crc kubenswrapper[4910]: I1125 21:31:56.943749 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:56Z","lastTransitionTime":"2025-11-25T21:31:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.046473 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.046506 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.046514 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.046529 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.046537 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:57Z","lastTransitionTime":"2025-11-25T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.148686 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.148716 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.148724 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.148748 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.148757 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:57Z","lastTransitionTime":"2025-11-25T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.250644 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.250918 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.251035 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.251151 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.251360 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:57Z","lastTransitionTime":"2025-11-25T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.353903 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.353941 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.353949 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.353963 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.353972 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:57Z","lastTransitionTime":"2025-11-25T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.455937 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.455973 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.455983 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.455998 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.456009 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:57Z","lastTransitionTime":"2025-11-25T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.558770 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.558825 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.558842 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.558863 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.558879 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:57Z","lastTransitionTime":"2025-11-25T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.661157 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.661198 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.661208 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.661223 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.661234 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:57Z","lastTransitionTime":"2025-11-25T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.763509 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.763543 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.763555 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.763572 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.763585 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:57Z","lastTransitionTime":"2025-11-25T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.865637 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.865679 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.865690 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.865707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.865718 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:57Z","lastTransitionTime":"2025-11-25T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.968082 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.968114 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.968123 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.968136 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:57 crc kubenswrapper[4910]: I1125 21:31:57.968146 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:57Z","lastTransitionTime":"2025-11-25T21:31:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.070024 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.070067 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.070075 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.070089 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.070098 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:58Z","lastTransitionTime":"2025-11-25T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.172606 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.172646 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.172654 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.172667 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.172676 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:58Z","lastTransitionTime":"2025-11-25T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.203329 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.203357 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.203407 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:31:58 crc kubenswrapper[4910]: E1125 21:31:58.203480 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:31:58 crc kubenswrapper[4910]: E1125 21:31:58.203555 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:31:58 crc kubenswrapper[4910]: E1125 21:31:58.203615 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.203802 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:31:58 crc kubenswrapper[4910]: E1125 21:31:58.204016 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.275498 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.275566 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.275584 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.275610 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.275628 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:58Z","lastTransitionTime":"2025-11-25T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.377742 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.377776 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.377786 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.377800 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.377810 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:58Z","lastTransitionTime":"2025-11-25T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.480054 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.480099 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.480108 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.480123 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.480132 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:58Z","lastTransitionTime":"2025-11-25T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.582432 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.582465 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.582479 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.582493 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.582503 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:58Z","lastTransitionTime":"2025-11-25T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.687185 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.687355 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.687371 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.687386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.687397 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:58Z","lastTransitionTime":"2025-11-25T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.789220 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.789272 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.789285 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.789301 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.789320 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:58Z","lastTransitionTime":"2025-11-25T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.894117 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.894152 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.894162 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.894179 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.894192 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:58Z","lastTransitionTime":"2025-11-25T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.996660 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.996686 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.996694 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.996707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:58 crc kubenswrapper[4910]: I1125 21:31:58.996716 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:58Z","lastTransitionTime":"2025-11-25T21:31:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.098809 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.098846 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.098861 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.098877 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.098887 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:59Z","lastTransitionTime":"2025-11-25T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.201030 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.201061 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.201070 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.201083 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.201091 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:59Z","lastTransitionTime":"2025-11-25T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.305358 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.305407 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.305420 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.305437 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.305453 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:59Z","lastTransitionTime":"2025-11-25T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.408274 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.408311 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.408323 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.408338 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.408348 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:59Z","lastTransitionTime":"2025-11-25T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.510697 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.510734 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.510743 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.510757 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.510766 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:59Z","lastTransitionTime":"2025-11-25T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.612438 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.612478 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.612489 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.612504 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.612514 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:59Z","lastTransitionTime":"2025-11-25T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.714808 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.714864 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.714873 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.714886 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.714895 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:59Z","lastTransitionTime":"2025-11-25T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.817340 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.817445 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.817463 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.817487 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.817506 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:59Z","lastTransitionTime":"2025-11-25T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.920156 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.920188 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.920196 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.920209 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:31:59 crc kubenswrapper[4910]: I1125 21:31:59.920217 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:31:59Z","lastTransitionTime":"2025-11-25T21:31:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.022801 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.022900 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.022928 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.022960 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.022983 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:00Z","lastTransitionTime":"2025-11-25T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.125587 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.125643 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.125660 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.125682 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.125698 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:00Z","lastTransitionTime":"2025-11-25T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.203632 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.203658 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.203664 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.203888 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:00 crc kubenswrapper[4910]: E1125 21:32:00.204000 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:00 crc kubenswrapper[4910]: E1125 21:32:00.204042 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:00 crc kubenswrapper[4910]: E1125 21:32:00.204095 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:00 crc kubenswrapper[4910]: E1125 21:32:00.204154 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.228580 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.228626 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.228639 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.228654 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.228665 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:00Z","lastTransitionTime":"2025-11-25T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.331661 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.331692 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.331700 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.331714 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.331724 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:00Z","lastTransitionTime":"2025-11-25T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.434761 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.434845 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.434871 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.434899 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.434920 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:00Z","lastTransitionTime":"2025-11-25T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.537792 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.537829 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.537841 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.537858 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.537870 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:00Z","lastTransitionTime":"2025-11-25T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.641310 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.641366 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.641383 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.641406 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.641426 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:00Z","lastTransitionTime":"2025-11-25T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.743717 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.743768 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.743779 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.743793 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.743802 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:00Z","lastTransitionTime":"2025-11-25T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.846488 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.846527 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.846536 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.846553 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.846562 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:00Z","lastTransitionTime":"2025-11-25T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.949171 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.949207 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.949218 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.949237 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:00 crc kubenswrapper[4910]: I1125 21:32:00.949270 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:00Z","lastTransitionTime":"2025-11-25T21:32:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.051427 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.051467 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.051478 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.051493 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.051506 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:01Z","lastTransitionTime":"2025-11-25T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.153544 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.153595 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.153612 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.153632 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.153649 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:01Z","lastTransitionTime":"2025-11-25T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.258990 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.259037 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.259047 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.259066 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.259077 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:01Z","lastTransitionTime":"2025-11-25T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.361559 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.361625 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.361643 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.361664 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.361683 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:01Z","lastTransitionTime":"2025-11-25T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.464516 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.464591 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.464616 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.464647 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.464669 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:01Z","lastTransitionTime":"2025-11-25T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.566711 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.566749 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.566760 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.566775 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.566786 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:01Z","lastTransitionTime":"2025-11-25T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.669868 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.669913 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.669929 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.669947 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.669957 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:01Z","lastTransitionTime":"2025-11-25T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.771774 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.771818 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.771836 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.771855 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.771870 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:01Z","lastTransitionTime":"2025-11-25T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.874868 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.874914 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.874931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.874949 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.874958 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:01Z","lastTransitionTime":"2025-11-25T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.976495 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.976530 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.976539 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.976563 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:01 crc kubenswrapper[4910]: I1125 21:32:01.976571 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:01Z","lastTransitionTime":"2025-11-25T21:32:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.101191 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.101226 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.101234 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.101270 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.101283 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:02Z","lastTransitionTime":"2025-11-25T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.202987 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.203000 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:02 crc kubenswrapper[4910]: E1125 21:32:02.203119 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.203160 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.203220 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:02 crc kubenswrapper[4910]: E1125 21:32:02.203681 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:02 crc kubenswrapper[4910]: E1125 21:32:02.203743 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:02 crc kubenswrapper[4910]: E1125 21:32:02.203804 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.203877 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.203893 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.203901 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.203912 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.203922 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:02Z","lastTransitionTime":"2025-11-25T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.204273 4910 scope.go:117] "RemoveContainer" containerID="60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.306764 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.306830 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.306852 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.306883 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.306904 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:02Z","lastTransitionTime":"2025-11-25T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.409503 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.409546 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.409557 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.409573 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.409583 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:02Z","lastTransitionTime":"2025-11-25T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.511725 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.512036 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.512046 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.512059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.512068 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:02Z","lastTransitionTime":"2025-11-25T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.614370 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.614414 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.614425 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.614460 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.614471 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:02Z","lastTransitionTime":"2025-11-25T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.716504 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.716548 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.716556 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.716571 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.716581 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:02Z","lastTransitionTime":"2025-11-25T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.764852 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/2.log" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.767106 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerStarted","Data":"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e"} Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.767580 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.781868 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.792396 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d4f724-3075-4ed5-9fbf-0be4d8f90ae1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b5e51a456a225217f89ee36a8f88095352ef89c81aed13a59d2df6906194e00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0ebb4575cb0c108c3c669085d2c368e7be4df12aa58d929d49d495f21718f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9997d0e99ead1f836cef297dc7e4a03323addaae2fdd218e57f3e4304316e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.803115 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.814531 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.818443 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.818487 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.818499 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.818516 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.818527 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:02Z","lastTransitionTime":"2025-11-25T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.827777 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b39c30d9198f06a3a3cc78ca7e9b031782bf2c9d50dc9801caee0d5e6b9d660\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:54Z\\\",\\\"message\\\":\\\"2025-11-25T21:31:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8\\\\n2025-11-25T21:31:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8 to /host/opt/cni/bin/\\\\n2025-11-25T21:31:09Z [verbose] multus-daemon started\\\\n2025-11-25T21:31:09Z [verbose] Readiness Indicator file check\\\\n2025-11-25T21:31:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.844614 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:31Z\\\",\\\"message\\\":\\\"1.381405 6566 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:31.381501 6566 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.381504 6566 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.382310 6566 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 21:31:31.382390 6566 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 21:31:31.382417 6566 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 21:31:31.382429 6566 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 21:31:31.382445 6566 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 21:31:31.382475 6566 factory.go:656] Stopping watch factory\\\\nI1125 21:31:31.382475 6566 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 21:31:31.382482 6566 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 21:31:31.382503 6566 ovnkube.go:599] Stopped ovnkube\\\\nI1125 21\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:32:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.855291 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.867906 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.879950 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.891553 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.904590 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.919672 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.920382 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.920416 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.920425 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.920439 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.920447 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:02Z","lastTransitionTime":"2025-11-25T21:32:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.930404 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.941115 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.952123 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.962470 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:02 crc kubenswrapper[4910]: I1125 21:32:02.972103 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.022450 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.022499 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.022510 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.022526 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.022536 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.125040 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.125081 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.125091 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.125104 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.125112 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.227291 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.227327 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.227335 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.227350 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.227360 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.329835 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.329898 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.329914 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.329931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.329941 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.337070 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.337115 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.337126 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.337144 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.337156 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: E1125 21:32:03.349203 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.352445 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.352477 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.352488 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.352503 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.352513 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: E1125 21:32:03.365384 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.369028 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.369065 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.369074 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.369089 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.369099 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: E1125 21:32:03.385075 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.389237 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.389290 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.389299 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.389315 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.389327 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: E1125 21:32:03.406653 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.410391 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.410431 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.410441 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.410456 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.410470 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: E1125 21:32:03.421193 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:03Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: E1125 21:32:03.421322 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.431840 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.431903 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.431927 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.431955 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.431975 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.534019 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.534051 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.534058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.534071 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.534079 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.636360 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.636423 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.636441 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.636467 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.636482 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.738815 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.738864 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.738881 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.738904 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.738918 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.771695 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/3.log" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.772460 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/2.log" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.776532 4910 generic.go:334] "Generic (PLEG): container finished" podID="4cf48d68-85c8-45e7-8533-550e120eca12" containerID="4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e" exitCode=1 Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.776573 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerDied","Data":"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e"} Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.776624 4910 scope.go:117] "RemoveContainer" containerID="60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.777649 4910 scope.go:117] "RemoveContainer" containerID="4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e" Nov 25 21:32:03 crc kubenswrapper[4910]: E1125 21:32:03.777947 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.797503 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.813152 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.825637 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.841192 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.842537 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.842612 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.842639 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.842670 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.842692 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.859942 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.873932 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.887370 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.911788 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.933628 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.946152 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.948516 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.948599 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.948625 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.948656 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.948679 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:03Z","lastTransitionTime":"2025-11-25T21:32:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.961677 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.975980 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:03 crc kubenswrapper[4910]: I1125 21:32:03.991876 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:03Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.004366 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b39c30d9198f06a3a3cc78ca7e9b031782bf2c9d50dc9801caee0d5e6b9d660\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:54Z\\\",\\\"message\\\":\\\"2025-11-25T21:31:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8\\\\n2025-11-25T21:31:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8 to /host/opt/cni/bin/\\\\n2025-11-25T21:31:09Z [verbose] multus-daemon started\\\\n2025-11-25T21:31:09Z [verbose] Readiness Indicator file check\\\\n2025-11-25T21:31:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.025075 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://60d95ccd1083f8f8a69e5bfffe1d36e4bfb3240c13fa4ecea76ff3c0d656e111\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:31Z\\\",\\\"message\\\":\\\"1.381405 6566 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1125 21:31:31.381501 6566 reflector.go:311] Stopping reflector *v1.AdminPolicyBasedExternalRoute (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/adminpolicybasedroute/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.381504 6566 reflector.go:311] Stopping reflector *v1.EgressIP (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressip/v1/apis/informers/externalversions/factory.go:140\\\\nI1125 21:31:31.382310 6566 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1125 21:31:31.382390 6566 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1125 21:31:31.382417 6566 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1125 21:31:31.382429 6566 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1125 21:31:31.382445 6566 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1125 21:31:31.382475 6566 factory.go:656] Stopping watch factory\\\\nI1125 21:31:31.382475 6566 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1125 21:31:31.382482 6566 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1125 21:31:31.382503 6566 ovnkube.go:599] Stopped ovnkube\\\\nI1125 21\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:30Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:32:02Z\\\",\\\"message\\\":\\\"365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 21:32:02.984384 6947 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-lpz8j after 0 failed attempt(s)\\\\nI1125 21:32:02.984388 6947 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI1125 21:32:02.984389 6947 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-lpz8j\\\\nF1125 21:32:02.984392 6947 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z]\\\\nI1125 21:32\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:32:02Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.036647 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.049585 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d4f724-3075-4ed5-9fbf-0be4d8f90ae1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b5e51a456a225217f89ee36a8f88095352ef89c81aed13a59d2df6906194e00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0ebb4575cb0c108c3c669085d2c368e7be4df12aa58d929d49d495f21718f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9997d0e99ead1f836cef297dc7e4a03323addaae2fdd218e57f3e4304316e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.051671 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.051719 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.051733 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.051754 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.051767 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:04Z","lastTransitionTime":"2025-11-25T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.155873 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.155928 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.155943 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.155962 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.155976 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:04Z","lastTransitionTime":"2025-11-25T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.203105 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.203145 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:04 crc kubenswrapper[4910]: E1125 21:32:04.203206 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.203109 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.203294 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:04 crc kubenswrapper[4910]: E1125 21:32:04.203431 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:04 crc kubenswrapper[4910]: E1125 21:32:04.203510 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:04 crc kubenswrapper[4910]: E1125 21:32:04.203601 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.258652 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.258703 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.258721 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.258746 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.258762 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:04Z","lastTransitionTime":"2025-11-25T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.361550 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.361598 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.361613 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.361633 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.361646 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:04Z","lastTransitionTime":"2025-11-25T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.464709 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.464743 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.464751 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.464763 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.464773 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:04Z","lastTransitionTime":"2025-11-25T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.567391 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.567435 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.567447 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.567464 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.567476 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:04Z","lastTransitionTime":"2025-11-25T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.670225 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.670287 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.670300 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.670319 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.670331 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:04Z","lastTransitionTime":"2025-11-25T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.772723 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.772783 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.772791 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.772803 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.772812 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:04Z","lastTransitionTime":"2025-11-25T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.781223 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/3.log" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.785127 4910 scope.go:117] "RemoveContainer" containerID="4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e" Nov 25 21:32:04 crc kubenswrapper[4910]: E1125 21:32:04.785274 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.801157 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d4f724-3075-4ed5-9fbf-0be4d8f90ae1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b5e51a456a225217f89ee36a8f88095352ef89c81aed13a59d2df6906194e00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0ebb4575cb0c108c3c669085d2c368e7be4df12aa58d929d49d495f21718f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9997d0e99ead1f836cef297dc7e4a03323addaae2fdd218e57f3e4304316e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.813806 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.832914 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.848871 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b39c30d9198f06a3a3cc78ca7e9b031782bf2c9d50dc9801caee0d5e6b9d660\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:54Z\\\",\\\"message\\\":\\\"2025-11-25T21:31:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8\\\\n2025-11-25T21:31:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8 to /host/opt/cni/bin/\\\\n2025-11-25T21:31:09Z [verbose] multus-daemon started\\\\n2025-11-25T21:31:09Z [verbose] Readiness Indicator file check\\\\n2025-11-25T21:31:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.867384 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:32:02Z\\\",\\\"message\\\":\\\"365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 21:32:02.984384 6947 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-lpz8j after 0 failed attempt(s)\\\\nI1125 21:32:02.984388 6947 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI1125 21:32:02.984389 6947 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-lpz8j\\\\nF1125 21:32:02.984392 6947 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z]\\\\nI1125 21:32\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:32:02Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.874615 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.874646 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.874655 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.874668 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.874677 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:04Z","lastTransitionTime":"2025-11-25T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.879515 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.890690 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.901854 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.914433 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.925409 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.938441 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.950681 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.960542 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.969502 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.977188 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.977224 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.977233 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.977285 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.977297 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:04Z","lastTransitionTime":"2025-11-25T21:32:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.982283 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:04 crc kubenswrapper[4910]: I1125 21:32:04.996266 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:04Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.008207 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.079342 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.079670 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.079787 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.079972 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.080335 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:05Z","lastTransitionTime":"2025-11-25T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.182351 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.182388 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.182397 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.182411 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.182421 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:05Z","lastTransitionTime":"2025-11-25T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.217089 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.228970 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.239386 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.249743 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.264524 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.274282 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.284898 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.285097 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.285125 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.285133 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.285147 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.285156 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:05Z","lastTransitionTime":"2025-11-25T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.294801 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.302760 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.311078 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.320154 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.337446 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:32:02Z\\\",\\\"message\\\":\\\"365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 21:32:02.984384 6947 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-lpz8j after 0 failed attempt(s)\\\\nI1125 21:32:02.984388 6947 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI1125 21:32:02.984389 6947 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-lpz8j\\\\nF1125 21:32:02.984392 6947 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z]\\\\nI1125 21:32\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:32:02Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.347380 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.357522 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d4f724-3075-4ed5-9fbf-0be4d8f90ae1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b5e51a456a225217f89ee36a8f88095352ef89c81aed13a59d2df6906194e00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0ebb4575cb0c108c3c669085d2c368e7be4df12aa58d929d49d495f21718f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9997d0e99ead1f836cef297dc7e4a03323addaae2fdd218e57f3e4304316e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.368423 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.379553 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.387605 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.387655 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.387667 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.387687 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.387701 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:05Z","lastTransitionTime":"2025-11-25T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.390062 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b39c30d9198f06a3a3cc78ca7e9b031782bf2c9d50dc9801caee0d5e6b9d660\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:54Z\\\",\\\"message\\\":\\\"2025-11-25T21:31:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8\\\\n2025-11-25T21:31:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8 to /host/opt/cni/bin/\\\\n2025-11-25T21:31:09Z [verbose] multus-daemon started\\\\n2025-11-25T21:31:09Z [verbose] Readiness Indicator file check\\\\n2025-11-25T21:31:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:05Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.489831 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.489867 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.489876 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.489892 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.489903 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:05Z","lastTransitionTime":"2025-11-25T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.591743 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.591778 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.591787 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.591807 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.591818 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:05Z","lastTransitionTime":"2025-11-25T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.694141 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.694179 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.694189 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.694205 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.694217 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:05Z","lastTransitionTime":"2025-11-25T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.796185 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.796217 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.796225 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.796238 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.796264 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:05Z","lastTransitionTime":"2025-11-25T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.900068 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.900148 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.900166 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.900188 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:05 crc kubenswrapper[4910]: I1125 21:32:05.900206 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:05Z","lastTransitionTime":"2025-11-25T21:32:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.003003 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.003053 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.003066 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.003083 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.003096 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:06Z","lastTransitionTime":"2025-11-25T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.105682 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.105726 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.105734 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.105751 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.105760 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:06Z","lastTransitionTime":"2025-11-25T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.203855 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.203879 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.203985 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.204022 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:06 crc kubenswrapper[4910]: E1125 21:32:06.204118 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:06 crc kubenswrapper[4910]: E1125 21:32:06.204184 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:06 crc kubenswrapper[4910]: E1125 21:32:06.204285 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:06 crc kubenswrapper[4910]: E1125 21:32:06.204353 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.207894 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.207929 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.207937 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.207950 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.207960 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:06Z","lastTransitionTime":"2025-11-25T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.310209 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.310282 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.310297 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.310313 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.310323 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:06Z","lastTransitionTime":"2025-11-25T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.413308 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.413341 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.413350 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.413363 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.413374 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:06Z","lastTransitionTime":"2025-11-25T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.516082 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.516127 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.516144 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.516166 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.516181 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:06Z","lastTransitionTime":"2025-11-25T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.619153 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.619195 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.619203 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.619218 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.619227 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:06Z","lastTransitionTime":"2025-11-25T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.721696 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.721745 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.721759 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.721778 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.721790 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:06Z","lastTransitionTime":"2025-11-25T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.823484 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.823512 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.823521 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.823533 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.823542 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:06Z","lastTransitionTime":"2025-11-25T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.926043 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.926078 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.926088 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.926104 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:06 crc kubenswrapper[4910]: I1125 21:32:06.926116 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:06Z","lastTransitionTime":"2025-11-25T21:32:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.028660 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.028887 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.028971 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.029218 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.029454 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:07Z","lastTransitionTime":"2025-11-25T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.131503 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.131535 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.131546 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.131559 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.131571 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:07Z","lastTransitionTime":"2025-11-25T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.234412 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.234445 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.234456 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.234470 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.234481 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:07Z","lastTransitionTime":"2025-11-25T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.338581 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.338650 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.338668 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.338695 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.338713 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:07Z","lastTransitionTime":"2025-11-25T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.443157 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.443282 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.443310 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.443345 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.443366 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:07Z","lastTransitionTime":"2025-11-25T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.546611 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.546696 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.546714 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.546741 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.546758 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:07Z","lastTransitionTime":"2025-11-25T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.649934 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.649988 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.650000 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.650017 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.650031 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:07Z","lastTransitionTime":"2025-11-25T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.753607 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.753671 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.753694 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.753727 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.753749 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:07Z","lastTransitionTime":"2025-11-25T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.878398 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.878454 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.878473 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.878499 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.878517 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:07Z","lastTransitionTime":"2025-11-25T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.980494 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.980573 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.980587 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.980647 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:07 crc kubenswrapper[4910]: I1125 21:32:07.980661 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:07Z","lastTransitionTime":"2025-11-25T21:32:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.083053 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.083085 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.083096 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.083111 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.083143 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:08Z","lastTransitionTime":"2025-11-25T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.185538 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.185582 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.185593 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.185611 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.185622 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:08Z","lastTransitionTime":"2025-11-25T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.203171 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.203234 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.203207 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.203207 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:08 crc kubenswrapper[4910]: E1125 21:32:08.203442 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:08 crc kubenswrapper[4910]: E1125 21:32:08.203581 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:08 crc kubenswrapper[4910]: E1125 21:32:08.203727 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:08 crc kubenswrapper[4910]: E1125 21:32:08.203939 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.287665 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.287702 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.287712 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.287726 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.287734 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:08Z","lastTransitionTime":"2025-11-25T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.390594 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.390655 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.390671 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.390694 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.390709 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:08Z","lastTransitionTime":"2025-11-25T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.493177 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.493210 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.493218 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.493232 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.493269 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:08Z","lastTransitionTime":"2025-11-25T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.595588 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.595653 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.595671 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.595700 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.595718 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:08Z","lastTransitionTime":"2025-11-25T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.699419 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.699490 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.699511 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.699541 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.699562 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:08Z","lastTransitionTime":"2025-11-25T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.802363 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.802406 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.802467 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.802492 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.802510 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:08Z","lastTransitionTime":"2025-11-25T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.906200 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.906311 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.906330 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.906350 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:08 crc kubenswrapper[4910]: I1125 21:32:08.906362 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:08Z","lastTransitionTime":"2025-11-25T21:32:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.009567 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.009632 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.009659 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.009686 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.009704 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:09Z","lastTransitionTime":"2025-11-25T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.111701 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.111749 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.111763 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.111779 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.111791 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:09Z","lastTransitionTime":"2025-11-25T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.213491 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.213542 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.213559 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.213578 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.213595 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:09Z","lastTransitionTime":"2025-11-25T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.316402 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.316494 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.316548 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.316577 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.316600 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:09Z","lastTransitionTime":"2025-11-25T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.419959 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.420043 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.420068 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.420102 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.420126 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:09Z","lastTransitionTime":"2025-11-25T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.494003 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.494143 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.494187 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:09 crc kubenswrapper[4910]: E1125 21:32:09.494220 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.494181469 +0000 UTC m=+148.956657831 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.494383 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:09 crc kubenswrapper[4910]: E1125 21:32:09.494426 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:32:09 crc kubenswrapper[4910]: E1125 21:32:09.494443 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.494477 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:09 crc kubenswrapper[4910]: E1125 21:32:09.494488 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:32:09 crc kubenswrapper[4910]: E1125 21:32:09.494574 4910 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:32:09 crc kubenswrapper[4910]: E1125 21:32:09.494580 4910 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:32:09 crc kubenswrapper[4910]: E1125 21:32:09.494459 4910 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 25 21:32:09 crc kubenswrapper[4910]: E1125 21:32:09.494638 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.49462254 +0000 UTC m=+148.957098902 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:32:09 crc kubenswrapper[4910]: E1125 21:32:09.494655 4910 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:32:09 crc kubenswrapper[4910]: E1125 21:32:09.494688 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.494664831 +0000 UTC m=+148.957141193 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 25 21:32:09 crc kubenswrapper[4910]: E1125 21:32:09.494527 4910 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:32:09 crc kubenswrapper[4910]: E1125 21:32:09.494715 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.494702972 +0000 UTC m=+148.957179334 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 25 21:32:09 crc kubenswrapper[4910]: E1125 21:32:09.494740 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.494729872 +0000 UTC m=+148.957206224 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.523031 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.523084 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.523101 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.523128 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.523145 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:09Z","lastTransitionTime":"2025-11-25T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.626502 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.626548 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.626560 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.626577 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.626591 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:09Z","lastTransitionTime":"2025-11-25T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.729666 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.729720 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.729742 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.729767 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.729783 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:09Z","lastTransitionTime":"2025-11-25T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.832583 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.832629 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.832646 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.832675 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.832696 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:09Z","lastTransitionTime":"2025-11-25T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.935519 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.935566 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.935578 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.935595 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:09 crc kubenswrapper[4910]: I1125 21:32:09.935606 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:09Z","lastTransitionTime":"2025-11-25T21:32:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.039264 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.039325 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.039335 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.039351 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.039360 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:10Z","lastTransitionTime":"2025-11-25T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.143351 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.143395 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.143407 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.143425 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.143436 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:10Z","lastTransitionTime":"2025-11-25T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.203194 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.203220 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:10 crc kubenswrapper[4910]: E1125 21:32:10.203308 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.203380 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.203589 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:10 crc kubenswrapper[4910]: E1125 21:32:10.203640 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:10 crc kubenswrapper[4910]: E1125 21:32:10.203758 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:10 crc kubenswrapper[4910]: E1125 21:32:10.203811 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.246350 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.246388 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.246399 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.246416 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.246427 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:10Z","lastTransitionTime":"2025-11-25T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.350369 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.350420 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.350437 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.350460 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.350477 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:10Z","lastTransitionTime":"2025-11-25T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.453030 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.453067 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.453078 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.453093 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.453104 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:10Z","lastTransitionTime":"2025-11-25T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.555070 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.555119 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.555137 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.555161 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.555177 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:10Z","lastTransitionTime":"2025-11-25T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.658300 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.658606 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.658618 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.658636 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.658648 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:10Z","lastTransitionTime":"2025-11-25T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.762023 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.762086 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.762103 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.762129 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.762148 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:10Z","lastTransitionTime":"2025-11-25T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.864078 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.864128 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.864153 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.864178 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.864194 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:10Z","lastTransitionTime":"2025-11-25T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.967007 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.967045 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.967057 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.967073 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:10 crc kubenswrapper[4910]: I1125 21:32:10.967085 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:10Z","lastTransitionTime":"2025-11-25T21:32:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.069289 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.069338 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.069350 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.069368 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.069378 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:11Z","lastTransitionTime":"2025-11-25T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.172074 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.172139 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.172155 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.172185 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.172210 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:11Z","lastTransitionTime":"2025-11-25T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.274286 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.274373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.274395 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.274428 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.274453 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:11Z","lastTransitionTime":"2025-11-25T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.377061 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.377125 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.377143 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.377165 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.377181 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:11Z","lastTransitionTime":"2025-11-25T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.480736 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.480819 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.480835 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.480860 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.480876 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:11Z","lastTransitionTime":"2025-11-25T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.583373 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.583482 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.583506 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.583539 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.583559 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:11Z","lastTransitionTime":"2025-11-25T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.686795 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.686845 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.686858 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.686876 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.686888 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:11Z","lastTransitionTime":"2025-11-25T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.789217 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.789304 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.789322 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.789346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.789365 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:11Z","lastTransitionTime":"2025-11-25T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.892485 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.892559 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.892577 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.892602 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.892621 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:11Z","lastTransitionTime":"2025-11-25T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.994887 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.994939 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.994954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.994974 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:11 crc kubenswrapper[4910]: I1125 21:32:11.994988 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:11Z","lastTransitionTime":"2025-11-25T21:32:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.097951 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.098002 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.098020 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.098042 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.098058 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:12Z","lastTransitionTime":"2025-11-25T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.201362 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.201441 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.201464 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.201498 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.201519 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:12Z","lastTransitionTime":"2025-11-25T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.203892 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.203963 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.203899 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.203894 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:12 crc kubenswrapper[4910]: E1125 21:32:12.204141 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:12 crc kubenswrapper[4910]: E1125 21:32:12.204532 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:12 crc kubenswrapper[4910]: E1125 21:32:12.204799 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:12 crc kubenswrapper[4910]: E1125 21:32:12.204855 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.303746 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.303819 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.303832 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.303848 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.303862 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:12Z","lastTransitionTime":"2025-11-25T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.406669 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.406712 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.406720 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.406736 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.406746 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:12Z","lastTransitionTime":"2025-11-25T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.508866 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.508916 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.508929 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.508948 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.508961 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:12Z","lastTransitionTime":"2025-11-25T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.611628 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.611678 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.611693 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.611713 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.611728 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:12Z","lastTransitionTime":"2025-11-25T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.714418 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.714470 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.714487 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.714509 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.714526 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:12Z","lastTransitionTime":"2025-11-25T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.816436 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.816478 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.816491 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.816507 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.816518 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:12Z","lastTransitionTime":"2025-11-25T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.919674 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.919745 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.919762 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.919787 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:12 crc kubenswrapper[4910]: I1125 21:32:12.919805 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:12Z","lastTransitionTime":"2025-11-25T21:32:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.021850 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.021893 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.021909 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.021928 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.021941 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:13Z","lastTransitionTime":"2025-11-25T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.125224 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.125293 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.125306 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.125322 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.125333 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:13Z","lastTransitionTime":"2025-11-25T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.228855 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.228925 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.228936 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.228956 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.228968 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:13Z","lastTransitionTime":"2025-11-25T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.331559 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.331603 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.331616 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.331637 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.331649 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:13Z","lastTransitionTime":"2025-11-25T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.434948 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.435065 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.435089 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.435120 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.435142 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:13Z","lastTransitionTime":"2025-11-25T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.500778 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.500852 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.500873 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.500902 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.500924 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:13Z","lastTransitionTime":"2025-11-25T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:13 crc kubenswrapper[4910]: E1125 21:32:13.523126 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.528149 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.528231 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.528300 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.528335 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.528359 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:13Z","lastTransitionTime":"2025-11-25T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:13 crc kubenswrapper[4910]: E1125 21:32:13.545782 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.550756 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.550815 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.550829 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.550849 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.550863 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:13Z","lastTransitionTime":"2025-11-25T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:13 crc kubenswrapper[4910]: E1125 21:32:13.567391 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.571760 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.571811 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.571823 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.571842 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.571856 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:13Z","lastTransitionTime":"2025-11-25T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:13 crc kubenswrapper[4910]: E1125 21:32:13.584541 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.589750 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.589805 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.589822 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.589846 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.589863 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:13Z","lastTransitionTime":"2025-11-25T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:13 crc kubenswrapper[4910]: E1125 21:32:13.608487 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:13Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:13Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:13 crc kubenswrapper[4910]: E1125 21:32:13.608888 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.610660 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.610772 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.610850 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.610931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.610997 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:13Z","lastTransitionTime":"2025-11-25T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.714440 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.714758 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.714865 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.714976 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.715074 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:13Z","lastTransitionTime":"2025-11-25T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.817347 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.817680 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.817779 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.817885 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.817986 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:13Z","lastTransitionTime":"2025-11-25T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.921055 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.921120 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.921142 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.921180 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:13 crc kubenswrapper[4910]: I1125 21:32:13.921204 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:13Z","lastTransitionTime":"2025-11-25T21:32:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.024003 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.024099 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.024122 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.024156 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.024180 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:14Z","lastTransitionTime":"2025-11-25T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.126889 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.126930 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.126965 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.126979 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.126988 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:14Z","lastTransitionTime":"2025-11-25T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.203060 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.203096 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.203184 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.203230 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:14 crc kubenswrapper[4910]: E1125 21:32:14.203315 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:14 crc kubenswrapper[4910]: E1125 21:32:14.203422 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:14 crc kubenswrapper[4910]: E1125 21:32:14.203697 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:14 crc kubenswrapper[4910]: E1125 21:32:14.203828 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.213729 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.229354 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.229394 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.229403 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.229422 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.229433 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:14Z","lastTransitionTime":"2025-11-25T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.331811 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.331900 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.331927 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.331968 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.331996 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:14Z","lastTransitionTime":"2025-11-25T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.434907 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.434963 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.434976 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.435000 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.435013 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:14Z","lastTransitionTime":"2025-11-25T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.538849 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.538933 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.538959 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.538994 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.539019 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:14Z","lastTransitionTime":"2025-11-25T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.642058 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.642103 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.642114 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.642134 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.642146 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:14Z","lastTransitionTime":"2025-11-25T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.745431 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.745553 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.745580 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.745611 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.745637 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:14Z","lastTransitionTime":"2025-11-25T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.848706 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.848752 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.848764 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.848782 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.848796 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:14Z","lastTransitionTime":"2025-11-25T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.951447 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.951524 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.951543 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.951579 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:14 crc kubenswrapper[4910]: I1125 21:32:14.951600 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:14Z","lastTransitionTime":"2025-11-25T21:32:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.054584 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.054663 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.054685 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.054719 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.054738 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:15Z","lastTransitionTime":"2025-11-25T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.157909 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.157987 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.158009 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.158034 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.158051 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:15Z","lastTransitionTime":"2025-11-25T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.221634 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.239448 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.260869 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.260914 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.260937 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.260957 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.260968 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:15Z","lastTransitionTime":"2025-11-25T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.262956 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b39c30d9198f06a3a3cc78ca7e9b031782bf2c9d50dc9801caee0d5e6b9d660\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:54Z\\\",\\\"message\\\":\\\"2025-11-25T21:31:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8\\\\n2025-11-25T21:31:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8 to /host/opt/cni/bin/\\\\n2025-11-25T21:31:09Z [verbose] multus-daemon started\\\\n2025-11-25T21:31:09Z [verbose] Readiness Indicator file check\\\\n2025-11-25T21:31:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.292689 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:32:02Z\\\",\\\"message\\\":\\\"365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 21:32:02.984384 6947 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-lpz8j after 0 failed attempt(s)\\\\nI1125 21:32:02.984388 6947 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI1125 21:32:02.984389 6947 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-lpz8j\\\\nF1125 21:32:02.984392 6947 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z]\\\\nI1125 21:32\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:32:02Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.308611 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.323764 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9cb75650-a608-44a0-8172-2befbef5cf39\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1856abcdc2d9828f760e18deb42cc996ab372e3f7ca2f560f4df9b02ac1dbb71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42dcae1e422c140f201d055b514920c37ef165bc83ace379b17311278469d953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42dcae1e422c140f201d055b514920c37ef165bc83ace379b17311278469d953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.343129 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d4f724-3075-4ed5-9fbf-0be4d8f90ae1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b5e51a456a225217f89ee36a8f88095352ef89c81aed13a59d2df6906194e00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0ebb4575cb0c108c3c669085d2c368e7be4df12aa58d929d49d495f21718f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9997d0e99ead1f836cef297dc7e4a03323addaae2fdd218e57f3e4304316e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.379358 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.379421 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.379440 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.379462 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.379490 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:15Z","lastTransitionTime":"2025-11-25T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.382532 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.402109 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.417789 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.435101 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.454501 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.470522 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.481983 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.482289 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.482396 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.482498 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.482606 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:15Z","lastTransitionTime":"2025-11-25T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.491499 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.508932 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.524064 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.540388 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.557180 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:15Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.585704 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.585774 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.585793 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.585823 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.585844 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:15Z","lastTransitionTime":"2025-11-25T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.688798 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.688861 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.688877 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.688902 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.688919 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:15Z","lastTransitionTime":"2025-11-25T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.792883 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.792939 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.792955 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.792976 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.792990 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:15Z","lastTransitionTime":"2025-11-25T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.896209 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.896317 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.896339 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.896366 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:15 crc kubenswrapper[4910]: I1125 21:32:15.896385 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:15Z","lastTransitionTime":"2025-11-25T21:32:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.000480 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.000558 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.000576 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.000608 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.000627 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:16Z","lastTransitionTime":"2025-11-25T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.104488 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.104547 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.104564 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.104591 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.104609 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:16Z","lastTransitionTime":"2025-11-25T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.203884 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.203962 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.203895 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.203886 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:16 crc kubenswrapper[4910]: E1125 21:32:16.204118 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:16 crc kubenswrapper[4910]: E1125 21:32:16.204316 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:16 crc kubenswrapper[4910]: E1125 21:32:16.204594 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:16 crc kubenswrapper[4910]: E1125 21:32:16.204737 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.207659 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.207746 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.207783 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.207818 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.207844 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:16Z","lastTransitionTime":"2025-11-25T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.311820 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.311897 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.311916 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.311947 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.311968 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:16Z","lastTransitionTime":"2025-11-25T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.415424 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.415512 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.415543 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.415580 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.415604 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:16Z","lastTransitionTime":"2025-11-25T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.519159 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.519285 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.519307 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.519340 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.519362 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:16Z","lastTransitionTime":"2025-11-25T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.623315 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.623374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.623390 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.623417 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.623436 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:16Z","lastTransitionTime":"2025-11-25T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.728001 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.728103 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.728122 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.728182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.728202 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:16Z","lastTransitionTime":"2025-11-25T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.831475 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.831524 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.831537 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.831558 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.831572 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:16Z","lastTransitionTime":"2025-11-25T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.935778 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.935848 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.935866 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.935895 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:16 crc kubenswrapper[4910]: I1125 21:32:16.935915 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:16Z","lastTransitionTime":"2025-11-25T21:32:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.040122 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.040280 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.040302 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.040331 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.040348 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:17Z","lastTransitionTime":"2025-11-25T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.144222 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.144332 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.144351 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.144385 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.144406 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:17Z","lastTransitionTime":"2025-11-25T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.204497 4910 scope.go:117] "RemoveContainer" containerID="4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e" Nov 25 21:32:17 crc kubenswrapper[4910]: E1125 21:32:17.204941 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.248697 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.248775 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.248794 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.248821 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.248843 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:17Z","lastTransitionTime":"2025-11-25T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.352823 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.352916 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.352941 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.352971 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.352990 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:17Z","lastTransitionTime":"2025-11-25T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.456641 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.456728 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.456752 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.456785 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.456807 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:17Z","lastTransitionTime":"2025-11-25T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.560442 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.560502 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.560521 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.560550 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.560573 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:17Z","lastTransitionTime":"2025-11-25T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.663604 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.663670 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.663688 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.663716 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.663735 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:17Z","lastTransitionTime":"2025-11-25T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.766951 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.767013 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.767030 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.767059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.767078 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:17Z","lastTransitionTime":"2025-11-25T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.871229 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.871331 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.871351 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.871381 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.871403 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:17Z","lastTransitionTime":"2025-11-25T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.974629 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.974688 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.974701 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.974725 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:17 crc kubenswrapper[4910]: I1125 21:32:17.974738 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:17Z","lastTransitionTime":"2025-11-25T21:32:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.078726 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.078809 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.078837 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.078870 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.078893 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:18Z","lastTransitionTime":"2025-11-25T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.182428 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.182494 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.182514 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.182541 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.182560 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:18Z","lastTransitionTime":"2025-11-25T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.203939 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.204012 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.204033 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.204044 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:18 crc kubenswrapper[4910]: E1125 21:32:18.204153 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:18 crc kubenswrapper[4910]: E1125 21:32:18.204338 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:18 crc kubenswrapper[4910]: E1125 21:32:18.204474 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:18 crc kubenswrapper[4910]: E1125 21:32:18.204537 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.286712 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.286779 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.286791 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.286829 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.286848 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:18Z","lastTransitionTime":"2025-11-25T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.390487 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.390530 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.390539 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.390554 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.390566 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:18Z","lastTransitionTime":"2025-11-25T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.493833 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.493897 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.493915 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.493941 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.493958 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:18Z","lastTransitionTime":"2025-11-25T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.596849 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.596924 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.596947 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.596975 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.596996 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:18Z","lastTransitionTime":"2025-11-25T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.700055 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.700114 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.700126 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.700142 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.700156 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:18Z","lastTransitionTime":"2025-11-25T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.802109 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.802172 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.802191 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.802217 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.802238 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:18Z","lastTransitionTime":"2025-11-25T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.905073 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.905112 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.905120 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.905135 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:18 crc kubenswrapper[4910]: I1125 21:32:18.905143 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:18Z","lastTransitionTime":"2025-11-25T21:32:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.007458 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.007537 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.007563 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.007592 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.007609 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:19Z","lastTransitionTime":"2025-11-25T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.110450 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.110521 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.110538 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.110563 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.110581 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:19Z","lastTransitionTime":"2025-11-25T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.213043 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.213116 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.213134 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.213159 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.213179 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:19Z","lastTransitionTime":"2025-11-25T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.316230 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.316281 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.316292 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.316311 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.316327 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:19Z","lastTransitionTime":"2025-11-25T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.423512 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.423779 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.423805 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.423824 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.423870 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:19Z","lastTransitionTime":"2025-11-25T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.527676 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.527739 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.527763 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.527793 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.527815 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:19Z","lastTransitionTime":"2025-11-25T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.630985 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.631048 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.631067 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.631088 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.631104 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:19Z","lastTransitionTime":"2025-11-25T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.733395 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.733457 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.733473 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.733495 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.733513 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:19Z","lastTransitionTime":"2025-11-25T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.835155 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.835200 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.835210 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.835227 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.835237 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:19Z","lastTransitionTime":"2025-11-25T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.938346 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.938412 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.938423 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.938437 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:19 crc kubenswrapper[4910]: I1125 21:32:19.938446 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:19Z","lastTransitionTime":"2025-11-25T21:32:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.041654 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.041698 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.041714 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.041737 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.041756 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:20Z","lastTransitionTime":"2025-11-25T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.144815 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.144863 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.144876 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.144895 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.144906 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:20Z","lastTransitionTime":"2025-11-25T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.203491 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.203591 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.203643 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.203643 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:20 crc kubenswrapper[4910]: E1125 21:32:20.203853 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:20 crc kubenswrapper[4910]: E1125 21:32:20.203949 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:20 crc kubenswrapper[4910]: E1125 21:32:20.204142 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:20 crc kubenswrapper[4910]: E1125 21:32:20.204491 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.247848 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.247956 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.247974 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.249064 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.249084 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:20Z","lastTransitionTime":"2025-11-25T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.351408 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.351451 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.351462 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.351482 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.351494 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:20Z","lastTransitionTime":"2025-11-25T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.454684 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.454721 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.454729 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.454742 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.454751 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:20Z","lastTransitionTime":"2025-11-25T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.558622 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.558696 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.558718 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.558749 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.558768 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:20Z","lastTransitionTime":"2025-11-25T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.662129 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.662178 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.662190 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.662207 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.662217 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:20Z","lastTransitionTime":"2025-11-25T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.765746 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.765817 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.765829 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.765851 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.765865 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:20Z","lastTransitionTime":"2025-11-25T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.869131 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.869208 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.869227 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.869304 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.869325 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:20Z","lastTransitionTime":"2025-11-25T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.972858 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.972901 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.972912 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.972929 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:20 crc kubenswrapper[4910]: I1125 21:32:20.972940 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:20Z","lastTransitionTime":"2025-11-25T21:32:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.076131 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.076214 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.076233 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.076293 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.076315 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:21Z","lastTransitionTime":"2025-11-25T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.179609 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.179680 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.179699 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.179728 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.179747 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:21Z","lastTransitionTime":"2025-11-25T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.282945 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.283032 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.283046 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.283066 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.283078 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:21Z","lastTransitionTime":"2025-11-25T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.386176 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.386287 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.386308 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.386340 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.386361 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:21Z","lastTransitionTime":"2025-11-25T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.489366 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.489440 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.489458 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.489486 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.489504 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:21Z","lastTransitionTime":"2025-11-25T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.592014 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.592060 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.592070 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.592093 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.592103 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:21Z","lastTransitionTime":"2025-11-25T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.694423 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.694467 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.694478 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.694492 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.694501 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:21Z","lastTransitionTime":"2025-11-25T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.797526 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.797585 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.797606 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.797627 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.797643 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:21Z","lastTransitionTime":"2025-11-25T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.900548 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.900705 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.900720 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.900735 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:21 crc kubenswrapper[4910]: I1125 21:32:21.900745 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:21Z","lastTransitionTime":"2025-11-25T21:32:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.003529 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.003578 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.003590 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.003610 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.003623 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:22Z","lastTransitionTime":"2025-11-25T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.106525 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.106763 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.106809 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.106853 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.106880 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:22Z","lastTransitionTime":"2025-11-25T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.203659 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.203817 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.203998 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:22 crc kubenswrapper[4910]: E1125 21:32:22.203998 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.204025 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:22 crc kubenswrapper[4910]: E1125 21:32:22.204127 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:22 crc kubenswrapper[4910]: E1125 21:32:22.204311 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:22 crc kubenswrapper[4910]: E1125 21:32:22.204360 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.209496 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.209537 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.209548 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.209563 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.209578 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:22Z","lastTransitionTime":"2025-11-25T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.311171 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.311215 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.311223 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.311237 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.311265 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:22Z","lastTransitionTime":"2025-11-25T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.413511 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.413556 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.413565 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.413579 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.413588 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:22Z","lastTransitionTime":"2025-11-25T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.516442 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.516516 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.516535 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.516566 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.516587 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:22Z","lastTransitionTime":"2025-11-25T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.619692 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.619741 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.619750 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.619764 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.619773 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:22Z","lastTransitionTime":"2025-11-25T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.722077 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.722148 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.722164 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.722190 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.722207 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:22Z","lastTransitionTime":"2025-11-25T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.824539 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.824665 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.824683 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.824706 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.824724 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:22Z","lastTransitionTime":"2025-11-25T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.927270 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.927308 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.927316 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.927328 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:22 crc kubenswrapper[4910]: I1125 21:32:22.927338 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:22Z","lastTransitionTime":"2025-11-25T21:32:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.030372 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.030409 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.030418 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.030432 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.030444 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:23Z","lastTransitionTime":"2025-11-25T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.133840 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.133886 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.133899 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.133916 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.133930 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:23Z","lastTransitionTime":"2025-11-25T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.236420 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.236464 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.236478 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.236491 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.236504 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:23Z","lastTransitionTime":"2025-11-25T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.339288 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.339354 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.339374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.339398 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.339416 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:23Z","lastTransitionTime":"2025-11-25T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.442350 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.442397 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.442405 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.442419 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.442428 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:23Z","lastTransitionTime":"2025-11-25T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.545920 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.546033 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.546059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.546098 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.546124 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:23Z","lastTransitionTime":"2025-11-25T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.650022 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.650102 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.650141 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.650181 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.650206 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:23Z","lastTransitionTime":"2025-11-25T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.713928 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.714047 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.714073 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.714115 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.714141 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:23Z","lastTransitionTime":"2025-11-25T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:23 crc kubenswrapper[4910]: E1125 21:32:23.736122 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:23Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.741053 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.741182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.741210 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.741239 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.741312 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:23Z","lastTransitionTime":"2025-11-25T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:23 crc kubenswrapper[4910]: E1125 21:32:23.764314 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:23Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.769608 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.769652 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.769663 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.769688 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.769700 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:23Z","lastTransitionTime":"2025-11-25T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:23 crc kubenswrapper[4910]: E1125 21:32:23.789145 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:23Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.793623 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.793674 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.793691 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.793715 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.793733 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:23Z","lastTransitionTime":"2025-11-25T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:23 crc kubenswrapper[4910]: E1125 21:32:23.814028 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:23Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.824298 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.824357 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.824500 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.824575 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.824623 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:23Z","lastTransitionTime":"2025-11-25T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:23 crc kubenswrapper[4910]: E1125 21:32:23.839688 4910 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-25T21:32:23Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"2a34d4b8-8687-4ac8-90d3-67253e425782\\\",\\\"systemUUID\\\":\\\"b9e4d4d5-ee6d-4a38-9671-95a95b18ac40\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:23Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:23 crc kubenswrapper[4910]: E1125 21:32:23.840319 4910 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.842651 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.842735 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.842761 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.842787 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.842844 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:23Z","lastTransitionTime":"2025-11-25T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.945932 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.946014 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.946031 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.946084 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:23 crc kubenswrapper[4910]: I1125 21:32:23.946103 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:23Z","lastTransitionTime":"2025-11-25T21:32:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.049722 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.049773 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.049784 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.049804 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.049816 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:24Z","lastTransitionTime":"2025-11-25T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.153118 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.153182 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.153203 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.153232 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.153290 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:24Z","lastTransitionTime":"2025-11-25T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.203882 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.203954 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.203970 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.204032 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:24 crc kubenswrapper[4910]: E1125 21:32:24.204143 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:24 crc kubenswrapper[4910]: E1125 21:32:24.204537 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:24 crc kubenswrapper[4910]: E1125 21:32:24.204684 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:24 crc kubenswrapper[4910]: E1125 21:32:24.204398 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.255924 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.256015 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.256050 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.256076 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.256087 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:24Z","lastTransitionTime":"2025-11-25T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.358897 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.358923 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.358931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.358944 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.358952 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:24Z","lastTransitionTime":"2025-11-25T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.461512 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.461577 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.461594 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.461663 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.461687 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:24Z","lastTransitionTime":"2025-11-25T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.564693 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.564776 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.564802 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.564836 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.564860 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:24Z","lastTransitionTime":"2025-11-25T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.667909 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.667959 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.667969 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.667983 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.667992 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:24Z","lastTransitionTime":"2025-11-25T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.770010 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.770077 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.770095 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.770121 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.770140 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:24Z","lastTransitionTime":"2025-11-25T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.862965 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs\") pod \"network-metrics-daemon-m4q5p\" (UID: \"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\") " pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:24 crc kubenswrapper[4910]: E1125 21:32:24.863149 4910 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:32:24 crc kubenswrapper[4910]: E1125 21:32:24.863226 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs podName:72d787b6-8fd2-4a83-9e8f-2654fdad81c9 nodeName:}" failed. No retries permitted until 2025-11-25 21:33:28.863204757 +0000 UTC m=+164.325681119 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs") pod "network-metrics-daemon-m4q5p" (UID: "72d787b6-8fd2-4a83-9e8f-2654fdad81c9") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.872910 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.872963 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.872980 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.873006 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.873023 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:24Z","lastTransitionTime":"2025-11-25T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.975189 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.975314 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.975391 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.975420 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:24 crc kubenswrapper[4910]: I1125 21:32:24.975840 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:24Z","lastTransitionTime":"2025-11-25T21:32:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.078022 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.078098 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.078123 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.078153 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.078172 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:25Z","lastTransitionTime":"2025-11-25T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.180785 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.180844 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.180860 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.180882 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.180897 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:25Z","lastTransitionTime":"2025-11-25T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.222131 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.243461 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.256828 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jngcr" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"08d06fc8-cc2c-4b86-a391-f6cb96fad95c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a6519ba042bbd74d14cc189fd34c97fd11925e282585e33e679a24cdd28aea84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4f3fdb7cd05b03869e0970742b25a276aec2fe5f015cb4a9e435712e726472f3\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7bf30f45082549a07a9e85a7bd844dac21db5406a9f63efe51e00f71c4946318\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4459b24870241280fe1f2b062a3b5f7e3bead99df5186887b0d641c85feeeb47\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8c39891ca69df72add314e4927d4209779eec424d5c105f6438ed7f6608a9874\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c7594a8611bee2d4918479c50caa7f325325b1f109054e7da40a4c71e1506d88\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://84c9451780791e03aa26da303730576d0d41fdb210d38ba4712f0611915dafb9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zsrh2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jngcr\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.268602 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4d503d4a-1acf-4d36-a9fc-a33c8255e4ff\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://27684287b1ed7ca90bb684de51efab670587f3bbabd0007710a2cae1a83d58e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c2f06ae104bf42de34f635a712f34b8533cc4137ccc7ad413c916b7979f05182\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ml2qb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:19Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-wp6p4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.282202 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7faead0-20c9-4f7d-a632-16cda08af34b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"message\\\":\\\"le observer\\\\nW1125 21:31:05.522802 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1125 21:31:05.522939 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1125 21:31:05.523702 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-3203126402/tls.crt::/tmp/serving-cert-3203126402/tls.key\\\\\\\"\\\\nI1125 21:31:05.965072 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1125 21:31:05.967851 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1125 21:31:05.967873 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1125 21:31:05.967895 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1125 21:31:05.967902 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1125 21:31:05.975885 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1125 21:31:05.975907 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975913 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1125 21:31:05.975918 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1125 21:31:05.975922 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1125 21:31:05.975925 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1125 21:31:05.975929 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1125 21:31:05.976088 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1125 21:31:05.977135 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:59Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:48Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.283500 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.283556 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.283568 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.283584 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.283621 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:25Z","lastTransitionTime":"2025-11-25T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.295376 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"53b0498b-4a22-4761-b405-8a357b1209f1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:05Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2097f4e48cecf28f5119cac82e7c3327ffb988ebd0b496106df0f76600d42307\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://12f4009be271c3052bdef86a438bbc619cbb81336075ac73abc7e9eb9348c3fc\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f5a56bfd3d3925b2010febfd82756c2e281d71338898a88e4e9ef8a36be1d09\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.307317 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-lpz8j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1ab2bfbf-87b6-418b-b6b9-707dd9239acc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://97d93e2fa9da6eab98488ee6a90882bb9afa459c2bb9ee877afabf1e99b6f87e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bczvw\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-lpz8j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.319563 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:20Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-5c4sn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:20Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-m4q5p\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.335544 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d9998df3df4366a6462a3917d298c363a328110f57596acd1bfbfaceb29a9f54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.350334 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"89c4a6ab-992c-467f-92fe-1111582e1b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3553e9caf2a3ed5d54679e2589e8bfb41cc95cbc202f5d9edd2772a4adba0806\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-8m6kf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-g8f4t\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.375451 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:04Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.386506 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.386554 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.386563 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.386582 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.386594 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:25Z","lastTransitionTime":"2025-11-25T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.391283 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0d4f724-3075-4ed5-9fbf-0be4d8f90ae1\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:39Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5b5e51a456a225217f89ee36a8f88095352ef89c81aed13a59d2df6906194e00\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a0ebb4575cb0c108c3c669085d2c368e7be4df12aa58d929d49d495f21718f35\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://e9997d0e99ead1f836cef297dc7e4a03323addaae2fdd218e57f3e4304316e4a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://47b691803fafe3a9eac6579b194ce961fa1e40deecc9432b708d39db8f183a8e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.407021 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://847092a89786cf51d15c33a9de19645ff3733c8210d6401df3b2d5aa6936fcfe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.421737 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:06Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0145cf85cbf87eb7845712ed33c216ea98c765234856d2567a1e2f838ed9a2db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c34bc1eeb2275b32db64f93c1265fa850b59ef9536547e1434bb523ae6915ada\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:06Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.437491 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-gqjcx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"751fe267-dc17-4de7-81e9-a8caab9e9817\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1b39c30d9198f06a3a3cc78ca7e9b031782bf2c9d50dc9801caee0d5e6b9d660\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:31:54Z\\\",\\\"message\\\":\\\"2025-11-25T21:31:09+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8\\\\n2025-11-25T21:31:09+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_ea0107e6-67a5-4fb9-bd59-603f9b8d26f8 to /host/opt/cni/bin/\\\\n2025-11-25T21:31:09Z [verbose] multus-daemon started\\\\n2025-11-25T21:31:09Z [verbose] Readiness Indicator file check\\\\n2025-11-25T21:31:54Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cf2sq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-multus\"/\"multus-gqjcx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.456920 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4cf48d68-85c8-45e7-8533-550e120eca12\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-25T21:32:02Z\\\",\\\"message\\\":\\\"365] Adding new object: *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc\\\\nI1125 21:32:02.984384 6947 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-lpz8j after 0 failed attempt(s)\\\\nI1125 21:32:02.984388 6947 ovn.go:134] Ensuring zone local for Pod openshift-kube-controller-manager/kube-controller-manager-crc in node crc\\\\nI1125 21:32:02.984389 6947 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-lpz8j\\\\nF1125 21:32:02.984392 6947 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:02Z is after 2025-08-24T17:21:41Z]\\\\nI1125 21:32\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-25T21:32:02Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:31:07Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:31:07Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ptdrh\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:07Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cvj2j\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.471350 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-rffgq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"6a03281c-d203-4d00-a8d8-c6ac28edd03b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:14Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:31:15Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://719e7d9d6abde3fbfd500d9493f62ce2f7e085897aef9c6ddaed1bd932ab11dd\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:31:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2xbf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:31:14Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-rffgq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.485545 4910 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9cb75650-a608-44a0-8172-2befbef5cf39\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-25T21:30:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1856abcdc2d9828f760e18deb42cc996ab372e3f7ca2f560f4df9b02ac1dbb71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-25T21:30:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://42dcae1e422c140f201d055b514920c37ef165bc83ace379b17311278469d953\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://42dcae1e422c140f201d055b514920c37ef165bc83ace379b17311278469d953\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-25T21:30:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-25T21:30:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-25T21:30:45Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-25T21:32:25Z is after 2025-08-24T17:21:41Z" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.489102 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.489200 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.489287 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.489370 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.489447 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:25Z","lastTransitionTime":"2025-11-25T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.591947 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.592014 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.592032 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.592055 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.592073 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:25Z","lastTransitionTime":"2025-11-25T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.694187 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.694234 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.694283 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.694306 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.694325 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:25Z","lastTransitionTime":"2025-11-25T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.796756 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.796805 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.796823 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.796844 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.796859 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:25Z","lastTransitionTime":"2025-11-25T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.900048 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.900110 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.900128 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.900153 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:25 crc kubenswrapper[4910]: I1125 21:32:25.900173 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:25Z","lastTransitionTime":"2025-11-25T21:32:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.003769 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.003827 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.003844 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.003870 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.003891 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:26Z","lastTransitionTime":"2025-11-25T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.106820 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.106882 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.106898 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.106921 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.106938 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:26Z","lastTransitionTime":"2025-11-25T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.203967 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.204081 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:26 crc kubenswrapper[4910]: E1125 21:32:26.204159 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.204187 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:26 crc kubenswrapper[4910]: E1125 21:32:26.204401 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.204446 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:26 crc kubenswrapper[4910]: E1125 21:32:26.204590 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:26 crc kubenswrapper[4910]: E1125 21:32:26.204693 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.209658 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.209696 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.209707 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.209725 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.209736 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:26Z","lastTransitionTime":"2025-11-25T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.312340 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.312377 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.312386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.312400 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.312409 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:26Z","lastTransitionTime":"2025-11-25T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.415586 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.415620 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.415631 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.415646 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.415655 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:26Z","lastTransitionTime":"2025-11-25T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.519176 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.519284 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.519309 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.519341 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.519364 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:26Z","lastTransitionTime":"2025-11-25T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.621842 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.621879 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.621889 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.621937 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.621951 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:26Z","lastTransitionTime":"2025-11-25T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.724652 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.724720 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.724744 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.724770 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.724788 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:26Z","lastTransitionTime":"2025-11-25T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.827100 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.827171 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.827194 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.827223 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.827284 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:26Z","lastTransitionTime":"2025-11-25T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.929627 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.929688 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.929706 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.929731 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:26 crc kubenswrapper[4910]: I1125 21:32:26.929749 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:26Z","lastTransitionTime":"2025-11-25T21:32:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.032386 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.032471 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.032487 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.032504 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.032516 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:27Z","lastTransitionTime":"2025-11-25T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.135093 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.135130 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.135142 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.135157 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.135168 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:27Z","lastTransitionTime":"2025-11-25T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.237776 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.237819 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.237828 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.237844 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.237855 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:27Z","lastTransitionTime":"2025-11-25T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.340570 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.340795 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.340975 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.341045 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.341099 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:27Z","lastTransitionTime":"2025-11-25T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.444074 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.444435 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.444605 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.444775 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.444899 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:27Z","lastTransitionTime":"2025-11-25T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.547390 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.547454 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.547471 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.547494 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.547510 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:27Z","lastTransitionTime":"2025-11-25T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.650044 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.650069 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.650077 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.650090 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.650100 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:27Z","lastTransitionTime":"2025-11-25T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.752894 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.752966 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.752989 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.753020 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.753043 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:27Z","lastTransitionTime":"2025-11-25T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.855091 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.855123 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.855131 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.855145 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.855153 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:27Z","lastTransitionTime":"2025-11-25T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.957559 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.957608 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.957619 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.957637 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:27 crc kubenswrapper[4910]: I1125 21:32:27.957647 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:27Z","lastTransitionTime":"2025-11-25T21:32:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.060189 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.060237 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.060298 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.060322 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.060339 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:28Z","lastTransitionTime":"2025-11-25T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.162883 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.162922 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.162932 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.162948 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.162960 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:28Z","lastTransitionTime":"2025-11-25T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.203335 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:28 crc kubenswrapper[4910]: E1125 21:32:28.203520 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.203859 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.203875 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:28 crc kubenswrapper[4910]: E1125 21:32:28.204333 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.204337 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:28 crc kubenswrapper[4910]: E1125 21:32:28.204538 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:28 crc kubenswrapper[4910]: E1125 21:32:28.204522 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.217503 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.265663 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.265954 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.266084 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.266198 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.266365 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:28Z","lastTransitionTime":"2025-11-25T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.369395 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.369451 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.369471 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.369495 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.369512 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:28Z","lastTransitionTime":"2025-11-25T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.472727 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.472777 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.472793 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.472814 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.472832 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:28Z","lastTransitionTime":"2025-11-25T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.576375 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.576499 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.576530 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.576558 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.576580 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:28Z","lastTransitionTime":"2025-11-25T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.679503 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.679577 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.679599 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.679628 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.679650 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:28Z","lastTransitionTime":"2025-11-25T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.782629 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.782676 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.782688 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.782705 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.782717 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:28Z","lastTransitionTime":"2025-11-25T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.885216 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.885276 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.885287 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.885305 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.885316 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:28Z","lastTransitionTime":"2025-11-25T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.988414 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.988491 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.988515 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.988543 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:28 crc kubenswrapper[4910]: I1125 21:32:28.988567 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:28Z","lastTransitionTime":"2025-11-25T21:32:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.091581 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.091629 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.091645 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.091667 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.091685 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:29Z","lastTransitionTime":"2025-11-25T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.194820 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.194867 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.194879 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.194896 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.194908 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:29Z","lastTransitionTime":"2025-11-25T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.298299 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.298352 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.298370 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.298392 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.298408 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:29Z","lastTransitionTime":"2025-11-25T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.400571 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.400641 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.400663 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.400691 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.400712 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:29Z","lastTransitionTime":"2025-11-25T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.503316 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.503391 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.503473 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.503565 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.503601 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:29Z","lastTransitionTime":"2025-11-25T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.607668 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.607805 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.607845 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.607877 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.607900 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:29Z","lastTransitionTime":"2025-11-25T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.712016 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.712155 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.712176 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.712207 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.712232 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:29Z","lastTransitionTime":"2025-11-25T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.815754 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.815809 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.815820 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.815838 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.815852 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:29Z","lastTransitionTime":"2025-11-25T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.919894 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.919977 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.920003 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.920038 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:29 crc kubenswrapper[4910]: I1125 21:32:29.920062 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:29Z","lastTransitionTime":"2025-11-25T21:32:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.022609 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.022671 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.022688 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.022710 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.022727 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:30Z","lastTransitionTime":"2025-11-25T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.125699 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.125771 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.125789 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.125814 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.125832 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:30Z","lastTransitionTime":"2025-11-25T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.203624 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.203787 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.203836 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.203848 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:30 crc kubenswrapper[4910]: E1125 21:32:30.204150 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:30 crc kubenswrapper[4910]: E1125 21:32:30.204345 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:30 crc kubenswrapper[4910]: E1125 21:32:30.204549 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:30 crc kubenswrapper[4910]: E1125 21:32:30.204708 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.228425 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.228490 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.228509 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.228531 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.228549 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:30Z","lastTransitionTime":"2025-11-25T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.331016 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.331079 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.331102 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.331132 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.331151 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:30Z","lastTransitionTime":"2025-11-25T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.434732 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.434810 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.434835 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.434866 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.434888 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:30Z","lastTransitionTime":"2025-11-25T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.537998 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.538057 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.538083 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.538115 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.538135 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:30Z","lastTransitionTime":"2025-11-25T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.641328 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.641403 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.641424 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.641456 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.641475 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:30Z","lastTransitionTime":"2025-11-25T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.744381 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.744445 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.744469 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.744503 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.744528 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:30Z","lastTransitionTime":"2025-11-25T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.847535 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.847594 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.847610 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.847634 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.847654 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:30Z","lastTransitionTime":"2025-11-25T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.950308 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.950559 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.950645 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.950784 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:30 crc kubenswrapper[4910]: I1125 21:32:30.950860 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:30Z","lastTransitionTime":"2025-11-25T21:32:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.053605 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.053664 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.053702 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.053735 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.053758 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:31Z","lastTransitionTime":"2025-11-25T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.157062 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.157127 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.157149 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.157179 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.157199 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:31Z","lastTransitionTime":"2025-11-25T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.260760 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.260804 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.260816 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.260832 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.260843 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:31Z","lastTransitionTime":"2025-11-25T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.364009 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.364047 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.364059 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.364075 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.364087 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:31Z","lastTransitionTime":"2025-11-25T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.486403 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.486459 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.486480 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.486512 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.486534 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:31Z","lastTransitionTime":"2025-11-25T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.588619 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.588653 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.588662 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.588676 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.588686 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:31Z","lastTransitionTime":"2025-11-25T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.691216 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.691285 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.691297 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.691316 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.691332 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:31Z","lastTransitionTime":"2025-11-25T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.794619 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.794674 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.794689 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.794712 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.794729 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:31Z","lastTransitionTime":"2025-11-25T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.896904 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.896962 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.896973 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.896987 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:31 crc kubenswrapper[4910]: I1125 21:32:31.896996 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:31Z","lastTransitionTime":"2025-11-25T21:32:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.000201 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.000318 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.000343 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.000374 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.000398 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:32Z","lastTransitionTime":"2025-11-25T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.102409 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.102465 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.102476 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.102493 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.102505 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:32Z","lastTransitionTime":"2025-11-25T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.203386 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.203444 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.203450 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:32 crc kubenswrapper[4910]: E1125 21:32:32.203591 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.203606 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.204153 4910 scope.go:117] "RemoveContainer" containerID="4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e" Nov 25 21:32:32 crc kubenswrapper[4910]: E1125 21:32:32.204298 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cvj2j_openshift-ovn-kubernetes(4cf48d68-85c8-45e7-8533-550e120eca12)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" Nov 25 21:32:32 crc kubenswrapper[4910]: E1125 21:32:32.204419 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:32 crc kubenswrapper[4910]: E1125 21:32:32.204475 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:32 crc kubenswrapper[4910]: E1125 21:32:32.204510 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.205014 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.205056 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.205068 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.205082 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.205094 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:32Z","lastTransitionTime":"2025-11-25T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.307969 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.308003 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.308012 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.308025 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.308033 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:32Z","lastTransitionTime":"2025-11-25T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.410827 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.410896 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.410912 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.410931 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.410945 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:32Z","lastTransitionTime":"2025-11-25T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.513720 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.513766 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.513778 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.513795 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.513809 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:32Z","lastTransitionTime":"2025-11-25T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.617151 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.617196 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.617207 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.617276 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.617287 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:32Z","lastTransitionTime":"2025-11-25T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.719626 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.719666 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.719691 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.719708 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.719717 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:32Z","lastTransitionTime":"2025-11-25T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.821692 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.821735 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.821746 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.821762 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.821774 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:32Z","lastTransitionTime":"2025-11-25T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.924467 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.924579 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.924598 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.924623 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:32 crc kubenswrapper[4910]: I1125 21:32:32.924640 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:32Z","lastTransitionTime":"2025-11-25T21:32:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.027896 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.027939 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.027950 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.027968 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.027980 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:33Z","lastTransitionTime":"2025-11-25T21:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.130560 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.130612 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.130627 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.130648 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.130666 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:33Z","lastTransitionTime":"2025-11-25T21:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.233138 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.233185 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.233197 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.233215 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.233226 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:33Z","lastTransitionTime":"2025-11-25T21:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.335642 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.335699 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.335710 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.335729 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.335741 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:33Z","lastTransitionTime":"2025-11-25T21:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.438672 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.438704 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.438715 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.438732 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.438745 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:33Z","lastTransitionTime":"2025-11-25T21:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.541509 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.541570 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.541590 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.541621 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.541645 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:33Z","lastTransitionTime":"2025-11-25T21:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.643893 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.643945 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.643962 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.643986 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.644003 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:33Z","lastTransitionTime":"2025-11-25T21:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.746818 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.746958 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.746979 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.747005 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.747021 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:33Z","lastTransitionTime":"2025-11-25T21:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.850221 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.850357 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.850387 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.850418 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.850435 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:33Z","lastTransitionTime":"2025-11-25T21:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.953923 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.953984 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.954005 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.954028 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:33 crc kubenswrapper[4910]: I1125 21:32:33.954045 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:33Z","lastTransitionTime":"2025-11-25T21:32:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.056759 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.056807 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.056819 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.056836 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.056849 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:34Z","lastTransitionTime":"2025-11-25T21:32:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.158692 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.158736 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.158745 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.158760 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.158770 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:34Z","lastTransitionTime":"2025-11-25T21:32:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.201138 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.201175 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.201186 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.201204 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.201215 4910 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-25T21:32:34Z","lastTransitionTime":"2025-11-25T21:32:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.202862 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.202904 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.202914 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.202918 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:34 crc kubenswrapper[4910]: E1125 21:32:34.203022 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:34 crc kubenswrapper[4910]: E1125 21:32:34.203322 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:34 crc kubenswrapper[4910]: E1125 21:32:34.203493 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:34 crc kubenswrapper[4910]: E1125 21:32:34.203618 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.252208 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f"] Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.252749 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.257959 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.258232 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.258345 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.258441 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.307652 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podStartSLOduration=89.307629733 podStartE2EDuration="1m29.307629733s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:32:34.306819343 +0000 UTC m=+109.769295675" watchObservedRunningTime="2025-11-25 21:32:34.307629733 +0000 UTC m=+109.770106065" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.331540 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-lpz8j" podStartSLOduration=89.331512616 podStartE2EDuration="1m29.331512616s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:32:34.319089117 +0000 UTC m=+109.781565449" watchObservedRunningTime="2025-11-25 21:32:34.331512616 +0000 UTC m=+109.793988948" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.366349 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f8f9f109-8c24-4d1e-b85f-377472433e6d-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.366398 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f8f9f109-8c24-4d1e-b85f-377472433e6d-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.366448 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f8f9f109-8c24-4d1e-b85f-377472433e6d-service-ca\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.366538 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f8f9f109-8c24-4d1e-b85f-377472433e6d-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.366570 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f8f9f109-8c24-4d1e-b85f-377472433e6d-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.410462 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-gqjcx" podStartSLOduration=89.410446007 podStartE2EDuration="1m29.410446007s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:32:34.376047033 +0000 UTC m=+109.838523365" watchObservedRunningTime="2025-11-25 21:32:34.410446007 +0000 UTC m=+109.872922329" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.429718 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-rffgq" podStartSLOduration=89.429697446 podStartE2EDuration="1m29.429697446s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:32:34.429198863 +0000 UTC m=+109.891675205" watchObservedRunningTime="2025-11-25 21:32:34.429697446 +0000 UTC m=+109.892173788" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.467138 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f8f9f109-8c24-4d1e-b85f-377472433e6d-service-ca\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.467213 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f8f9f109-8c24-4d1e-b85f-377472433e6d-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.467257 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f8f9f109-8c24-4d1e-b85f-377472433e6d-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.467308 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f8f9f109-8c24-4d1e-b85f-377472433e6d-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.467331 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f8f9f109-8c24-4d1e-b85f-377472433e6d-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.467384 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/f8f9f109-8c24-4d1e-b85f-377472433e6d-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.467501 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/f8f9f109-8c24-4d1e-b85f-377472433e6d-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.467951 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f8f9f109-8c24-4d1e-b85f-377472433e6d-service-ca\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.471827 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f8f9f109-8c24-4d1e-b85f-377472433e6d-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.489923 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=20.489905502 podStartE2EDuration="20.489905502s" podCreationTimestamp="2025-11-25 21:32:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:32:34.451764774 +0000 UTC m=+109.914241106" watchObservedRunningTime="2025-11-25 21:32:34.489905502 +0000 UTC m=+109.952381824" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.490005 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=6.490000764 podStartE2EDuration="6.490000764s" podCreationTimestamp="2025-11-25 21:32:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:32:34.489770608 +0000 UTC m=+109.952246930" watchObservedRunningTime="2025-11-25 21:32:34.490000764 +0000 UTC m=+109.952477086" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.499336 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f8f9f109-8c24-4d1e-b85f-377472433e6d-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-qxj2f\" (UID: \"f8f9f109-8c24-4d1e-b85f-377472433e6d\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.504619 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=55.504600847 podStartE2EDuration="55.504600847s" podCreationTimestamp="2025-11-25 21:31:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:32:34.503415727 +0000 UTC m=+109.965892049" watchObservedRunningTime="2025-11-25 21:32:34.504600847 +0000 UTC m=+109.967077169" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.539224 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-jngcr" podStartSLOduration=89.539208647 podStartE2EDuration="1m29.539208647s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:32:34.538685574 +0000 UTC m=+110.001161896" watchObservedRunningTime="2025-11-25 21:32:34.539208647 +0000 UTC m=+110.001684969" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.563425 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-wp6p4" podStartSLOduration=88.563407228 podStartE2EDuration="1m28.563407228s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:32:34.549304387 +0000 UTC m=+110.011780709" watchObservedRunningTime="2025-11-25 21:32:34.563407228 +0000 UTC m=+110.025883550" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.571274 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.576329 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=89.576315188 podStartE2EDuration="1m29.576315188s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:32:34.576179205 +0000 UTC m=+110.038655527" watchObservedRunningTime="2025-11-25 21:32:34.576315188 +0000 UTC m=+110.038791520" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.576590 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=88.576583935 podStartE2EDuration="1m28.576583935s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:32:34.563584402 +0000 UTC m=+110.026060734" watchObservedRunningTime="2025-11-25 21:32:34.576583935 +0000 UTC m=+110.039060267" Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.888755 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" event={"ID":"f8f9f109-8c24-4d1e-b85f-377472433e6d","Type":"ContainerStarted","Data":"728acb22c5806c86a70a7742545a62d2a0247c07e2cab75b9cc0788e995c9f67"} Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.888821 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" event={"ID":"f8f9f109-8c24-4d1e-b85f-377472433e6d","Type":"ContainerStarted","Data":"c5e00a34d6b3232344a5c95e1aaa37cf484192f69987d2af8146c447bbe33509"} Nov 25 21:32:34 crc kubenswrapper[4910]: I1125 21:32:34.907806 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-qxj2f" podStartSLOduration=89.907777084 podStartE2EDuration="1m29.907777084s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:32:34.906629665 +0000 UTC m=+110.369105997" watchObservedRunningTime="2025-11-25 21:32:34.907777084 +0000 UTC m=+110.370253456" Nov 25 21:32:36 crc kubenswrapper[4910]: I1125 21:32:36.203724 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:36 crc kubenswrapper[4910]: I1125 21:32:36.203776 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:36 crc kubenswrapper[4910]: I1125 21:32:36.203824 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:36 crc kubenswrapper[4910]: I1125 21:32:36.203787 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:36 crc kubenswrapper[4910]: E1125 21:32:36.203933 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:36 crc kubenswrapper[4910]: E1125 21:32:36.204039 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:36 crc kubenswrapper[4910]: E1125 21:32:36.204410 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:36 crc kubenswrapper[4910]: E1125 21:32:36.204505 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:38 crc kubenswrapper[4910]: I1125 21:32:38.203805 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:38 crc kubenswrapper[4910]: I1125 21:32:38.203851 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:38 crc kubenswrapper[4910]: E1125 21:32:38.204332 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:38 crc kubenswrapper[4910]: I1125 21:32:38.203939 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:38 crc kubenswrapper[4910]: I1125 21:32:38.203922 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:38 crc kubenswrapper[4910]: E1125 21:32:38.204508 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:38 crc kubenswrapper[4910]: E1125 21:32:38.204624 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:38 crc kubenswrapper[4910]: E1125 21:32:38.204739 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:40 crc kubenswrapper[4910]: I1125 21:32:40.203884 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:40 crc kubenswrapper[4910]: E1125 21:32:40.204182 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:40 crc kubenswrapper[4910]: I1125 21:32:40.205360 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:40 crc kubenswrapper[4910]: I1125 21:32:40.205428 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:40 crc kubenswrapper[4910]: I1125 21:32:40.205448 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:40 crc kubenswrapper[4910]: E1125 21:32:40.205940 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:40 crc kubenswrapper[4910]: E1125 21:32:40.206053 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:40 crc kubenswrapper[4910]: E1125 21:32:40.205686 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:40 crc kubenswrapper[4910]: I1125 21:32:40.909070 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gqjcx_751fe267-dc17-4de7-81e9-a8caab9e9817/kube-multus/1.log" Nov 25 21:32:40 crc kubenswrapper[4910]: I1125 21:32:40.909600 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gqjcx_751fe267-dc17-4de7-81e9-a8caab9e9817/kube-multus/0.log" Nov 25 21:32:40 crc kubenswrapper[4910]: I1125 21:32:40.909656 4910 generic.go:334] "Generic (PLEG): container finished" podID="751fe267-dc17-4de7-81e9-a8caab9e9817" containerID="1b39c30d9198f06a3a3cc78ca7e9b031782bf2c9d50dc9801caee0d5e6b9d660" exitCode=1 Nov 25 21:32:40 crc kubenswrapper[4910]: I1125 21:32:40.909691 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gqjcx" event={"ID":"751fe267-dc17-4de7-81e9-a8caab9e9817","Type":"ContainerDied","Data":"1b39c30d9198f06a3a3cc78ca7e9b031782bf2c9d50dc9801caee0d5e6b9d660"} Nov 25 21:32:40 crc kubenswrapper[4910]: I1125 21:32:40.909736 4910 scope.go:117] "RemoveContainer" containerID="7ac8e70b18d055e4f3763cf9c8a0549deb14290e8e0893fa03b9236dada848ff" Nov 25 21:32:40 crc kubenswrapper[4910]: I1125 21:32:40.910309 4910 scope.go:117] "RemoveContainer" containerID="1b39c30d9198f06a3a3cc78ca7e9b031782bf2c9d50dc9801caee0d5e6b9d660" Nov 25 21:32:40 crc kubenswrapper[4910]: E1125 21:32:40.910573 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-gqjcx_openshift-multus(751fe267-dc17-4de7-81e9-a8caab9e9817)\"" pod="openshift-multus/multus-gqjcx" podUID="751fe267-dc17-4de7-81e9-a8caab9e9817" Nov 25 21:32:41 crc kubenswrapper[4910]: I1125 21:32:41.914640 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gqjcx_751fe267-dc17-4de7-81e9-a8caab9e9817/kube-multus/1.log" Nov 25 21:32:42 crc kubenswrapper[4910]: I1125 21:32:42.203591 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:42 crc kubenswrapper[4910]: I1125 21:32:42.203617 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:42 crc kubenswrapper[4910]: I1125 21:32:42.203591 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:42 crc kubenswrapper[4910]: I1125 21:32:42.203673 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:42 crc kubenswrapper[4910]: E1125 21:32:42.203760 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:42 crc kubenswrapper[4910]: E1125 21:32:42.203833 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:42 crc kubenswrapper[4910]: E1125 21:32:42.203939 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:42 crc kubenswrapper[4910]: E1125 21:32:42.204036 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:44 crc kubenswrapper[4910]: I1125 21:32:44.203727 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:44 crc kubenswrapper[4910]: I1125 21:32:44.203889 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:44 crc kubenswrapper[4910]: E1125 21:32:44.204491 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:44 crc kubenswrapper[4910]: I1125 21:32:44.203959 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:44 crc kubenswrapper[4910]: E1125 21:32:44.204572 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:44 crc kubenswrapper[4910]: I1125 21:32:44.203941 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:44 crc kubenswrapper[4910]: E1125 21:32:44.204722 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:44 crc kubenswrapper[4910]: E1125 21:32:44.204772 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:45 crc kubenswrapper[4910]: E1125 21:32:45.170989 4910 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 25 21:32:45 crc kubenswrapper[4910]: E1125 21:32:45.386771 4910 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 21:32:46 crc kubenswrapper[4910]: I1125 21:32:46.203837 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:46 crc kubenswrapper[4910]: I1125 21:32:46.203971 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:46 crc kubenswrapper[4910]: I1125 21:32:46.203905 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:46 crc kubenswrapper[4910]: I1125 21:32:46.203873 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:46 crc kubenswrapper[4910]: E1125 21:32:46.204062 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:46 crc kubenswrapper[4910]: E1125 21:32:46.204144 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:46 crc kubenswrapper[4910]: E1125 21:32:46.204217 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:46 crc kubenswrapper[4910]: E1125 21:32:46.204648 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:46 crc kubenswrapper[4910]: I1125 21:32:46.205000 4910 scope.go:117] "RemoveContainer" containerID="4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e" Nov 25 21:32:46 crc kubenswrapper[4910]: I1125 21:32:46.934638 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/3.log" Nov 25 21:32:46 crc kubenswrapper[4910]: I1125 21:32:46.938871 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerStarted","Data":"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b"} Nov 25 21:32:46 crc kubenswrapper[4910]: I1125 21:32:46.939355 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:32:46 crc kubenswrapper[4910]: I1125 21:32:46.976641 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podStartSLOduration=101.976615083 podStartE2EDuration="1m41.976615083s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:32:46.975137826 +0000 UTC m=+122.437614218" watchObservedRunningTime="2025-11-25 21:32:46.976615083 +0000 UTC m=+122.439091435" Nov 25 21:32:47 crc kubenswrapper[4910]: I1125 21:32:47.016949 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-m4q5p"] Nov 25 21:32:47 crc kubenswrapper[4910]: I1125 21:32:47.017071 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:47 crc kubenswrapper[4910]: E1125 21:32:47.017162 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:48 crc kubenswrapper[4910]: I1125 21:32:48.203376 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:48 crc kubenswrapper[4910]: I1125 21:32:48.203413 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:48 crc kubenswrapper[4910]: I1125 21:32:48.203375 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:48 crc kubenswrapper[4910]: E1125 21:32:48.203541 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:48 crc kubenswrapper[4910]: E1125 21:32:48.203676 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:48 crc kubenswrapper[4910]: E1125 21:32:48.203788 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:49 crc kubenswrapper[4910]: I1125 21:32:49.203117 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:49 crc kubenswrapper[4910]: E1125 21:32:49.203446 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:50 crc kubenswrapper[4910]: I1125 21:32:50.203565 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:50 crc kubenswrapper[4910]: I1125 21:32:50.203674 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:50 crc kubenswrapper[4910]: E1125 21:32:50.203704 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:50 crc kubenswrapper[4910]: I1125 21:32:50.203755 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:50 crc kubenswrapper[4910]: E1125 21:32:50.203899 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:50 crc kubenswrapper[4910]: E1125 21:32:50.203944 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:50 crc kubenswrapper[4910]: E1125 21:32:50.387814 4910 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 21:32:51 crc kubenswrapper[4910]: I1125 21:32:51.203398 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:51 crc kubenswrapper[4910]: E1125 21:32:51.203556 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:52 crc kubenswrapper[4910]: I1125 21:32:52.203815 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:52 crc kubenswrapper[4910]: I1125 21:32:52.203897 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:52 crc kubenswrapper[4910]: I1125 21:32:52.204022 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:52 crc kubenswrapper[4910]: E1125 21:32:52.204439 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:52 crc kubenswrapper[4910]: E1125 21:32:52.204218 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:52 crc kubenswrapper[4910]: E1125 21:32:52.204621 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:53 crc kubenswrapper[4910]: I1125 21:32:53.204071 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:53 crc kubenswrapper[4910]: E1125 21:32:53.204389 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:54 crc kubenswrapper[4910]: I1125 21:32:54.203049 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:54 crc kubenswrapper[4910]: I1125 21:32:54.203078 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:54 crc kubenswrapper[4910]: E1125 21:32:54.203235 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:54 crc kubenswrapper[4910]: I1125 21:32:54.203335 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:54 crc kubenswrapper[4910]: E1125 21:32:54.203385 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:54 crc kubenswrapper[4910]: E1125 21:32:54.203584 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:55 crc kubenswrapper[4910]: I1125 21:32:55.203833 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:55 crc kubenswrapper[4910]: E1125 21:32:55.204874 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:55 crc kubenswrapper[4910]: E1125 21:32:55.389518 4910 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 25 21:32:56 crc kubenswrapper[4910]: I1125 21:32:56.203142 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:56 crc kubenswrapper[4910]: I1125 21:32:56.203372 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:56 crc kubenswrapper[4910]: I1125 21:32:56.203656 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:56 crc kubenswrapper[4910]: E1125 21:32:56.203651 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:56 crc kubenswrapper[4910]: E1125 21:32:56.203795 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:56 crc kubenswrapper[4910]: E1125 21:32:56.203847 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:56 crc kubenswrapper[4910]: I1125 21:32:56.204371 4910 scope.go:117] "RemoveContainer" containerID="1b39c30d9198f06a3a3cc78ca7e9b031782bf2c9d50dc9801caee0d5e6b9d660" Nov 25 21:32:56 crc kubenswrapper[4910]: I1125 21:32:56.977132 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gqjcx_751fe267-dc17-4de7-81e9-a8caab9e9817/kube-multus/1.log" Nov 25 21:32:56 crc kubenswrapper[4910]: I1125 21:32:56.977176 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gqjcx" event={"ID":"751fe267-dc17-4de7-81e9-a8caab9e9817","Type":"ContainerStarted","Data":"ea56b080cad081ce614b04495b79e924097aaedd91ee98cd8bfb6edb241108dd"} Nov 25 21:32:57 crc kubenswrapper[4910]: I1125 21:32:57.203233 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:57 crc kubenswrapper[4910]: E1125 21:32:57.203560 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:32:58 crc kubenswrapper[4910]: I1125 21:32:58.203659 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:32:58 crc kubenswrapper[4910]: E1125 21:32:58.203823 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:32:58 crc kubenswrapper[4910]: I1125 21:32:58.204050 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:32:58 crc kubenswrapper[4910]: I1125 21:32:58.204087 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:32:58 crc kubenswrapper[4910]: E1125 21:32:58.204151 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:32:58 crc kubenswrapper[4910]: E1125 21:32:58.204375 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:32:59 crc kubenswrapper[4910]: I1125 21:32:59.202980 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:32:59 crc kubenswrapper[4910]: E1125 21:32:59.203590 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-m4q5p" podUID="72d787b6-8fd2-4a83-9e8f-2654fdad81c9" Nov 25 21:33:00 crc kubenswrapper[4910]: I1125 21:33:00.203737 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:33:00 crc kubenswrapper[4910]: I1125 21:33:00.203799 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:33:00 crc kubenswrapper[4910]: E1125 21:33:00.203869 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 25 21:33:00 crc kubenswrapper[4910]: I1125 21:33:00.203744 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:33:00 crc kubenswrapper[4910]: E1125 21:33:00.203941 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 25 21:33:00 crc kubenswrapper[4910]: E1125 21:33:00.203992 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 25 21:33:00 crc kubenswrapper[4910]: I1125 21:33:00.928960 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:33:01 crc kubenswrapper[4910]: I1125 21:33:01.203784 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:33:01 crc kubenswrapper[4910]: I1125 21:33:01.206420 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 21:33:01 crc kubenswrapper[4910]: I1125 21:33:01.206428 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 21:33:02 crc kubenswrapper[4910]: I1125 21:33:02.202954 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:33:02 crc kubenswrapper[4910]: I1125 21:33:02.203021 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:33:02 crc kubenswrapper[4910]: I1125 21:33:02.203085 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:33:02 crc kubenswrapper[4910]: I1125 21:33:02.204542 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 21:33:02 crc kubenswrapper[4910]: I1125 21:33:02.204746 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 21:33:02 crc kubenswrapper[4910]: I1125 21:33:02.204993 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 21:33:02 crc kubenswrapper[4910]: I1125 21:33:02.205899 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 21:33:04 crc kubenswrapper[4910]: I1125 21:33:04.910640 4910 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 25 21:33:04 crc kubenswrapper[4910]: I1125 21:33:04.977338 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-k7kph"] Nov 25 21:33:04 crc kubenswrapper[4910]: I1125 21:33:04.978511 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:04 crc kubenswrapper[4910]: I1125 21:33:04.983329 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 21:33:04 crc kubenswrapper[4910]: I1125 21:33:04.988750 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 21:33:04 crc kubenswrapper[4910]: I1125 21:33:04.988751 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 21:33:04 crc kubenswrapper[4910]: I1125 21:33:04.988870 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 21:33:04 crc kubenswrapper[4910]: I1125 21:33:04.988952 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 21:33:04 crc kubenswrapper[4910]: I1125 21:33:04.989686 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 21:33:04 crc kubenswrapper[4910]: I1125 21:33:04.990352 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.000402 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.003237 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.004627 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.005740 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.031843 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.034073 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.035021 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.037677 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-8gf7r"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.038019 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.038672 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.038991 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.039945 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.041005 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.041120 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.041336 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.041461 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.041568 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.041661 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.041744 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.041862 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.042030 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.042215 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.042357 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.042445 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.042546 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.042606 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.043414 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.043687 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.043996 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-6lgsx"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.046532 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.048526 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.048810 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.049159 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.049558 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.049649 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.049964 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.050526 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.050748 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-4hf2m"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.051120 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-4hf2m" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.051449 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.056285 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.057154 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5ncw9"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.057411 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.057639 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.057168 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.058546 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.058573 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.058667 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.057332 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.057978 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.058096 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.058208 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.058469 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.060012 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.060089 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.060279 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.060424 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.060649 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.060737 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.061063 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.063552 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.063816 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.063974 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-jqcq6"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.064967 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.076060 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.076671 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-swtsr"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.077022 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.077192 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.077422 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.077490 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.077676 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.077861 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.078037 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.078095 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.078319 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.078338 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.078447 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.078597 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.078733 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.079157 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.081887 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.082889 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58a13313-0bca-42ed-99e3-1d7898d5458d-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-wkpdc\" (UID: \"58a13313-0bca-42ed-99e3-1d7898d5458d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.082970 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ksdd8\" (UniqueName: \"kubernetes.io/projected/bc0f5871-442b-4fa3-863c-173c2df1ffd4-kube-api-access-ksdd8\") pod \"machine-api-operator-5694c8668f-8gf7r\" (UID: \"bc0f5871-442b-4fa3-863c-173c2df1ffd4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083008 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58a13313-0bca-42ed-99e3-1d7898d5458d-config\") pod \"openshift-apiserver-operator-796bbdcf4f-wkpdc\" (UID: \"58a13313-0bca-42ed-99e3-1d7898d5458d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083044 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-node-pullsecrets\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083277 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d166f48f-7e2e-4c0f-a121-0899af7f81ab-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083302 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-audit\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083328 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8d4a0088-93eb-4841-84dd-052dc087ab13-serving-cert\") pod \"route-controller-manager-6576b87f9c-2vxgk\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083354 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083381 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2z7z\" (UniqueName: \"kubernetes.io/projected/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-kube-api-access-t2z7z\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083406 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bc0f5871-442b-4fa3-863c-173c2df1ffd4-images\") pod \"machine-api-operator-5694c8668f-8gf7r\" (UID: \"bc0f5871-442b-4fa3-863c-173c2df1ffd4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083442 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d166f48f-7e2e-4c0f-a121-0899af7f81ab-serving-cert\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083468 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d166f48f-7e2e-4c0f-a121-0899af7f81ab-audit-dir\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083491 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-image-import-ca\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083519 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d166f48f-7e2e-4c0f-a121-0899af7f81ab-etcd-client\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083547 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-etcd-client\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083571 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d166f48f-7e2e-4c0f-a121-0899af7f81ab-audit-policies\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083594 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gwdt6\" (UniqueName: \"kubernetes.io/projected/58a13313-0bca-42ed-99e3-1d7898d5458d-kube-api-access-gwdt6\") pod \"openshift-apiserver-operator-796bbdcf4f-wkpdc\" (UID: \"58a13313-0bca-42ed-99e3-1d7898d5458d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083623 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc0f5871-442b-4fa3-863c-173c2df1ffd4-config\") pod \"machine-api-operator-5694c8668f-8gf7r\" (UID: \"bc0f5871-442b-4fa3-863c-173c2df1ffd4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083666 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtwvn\" (UniqueName: \"kubernetes.io/projected/d166f48f-7e2e-4c0f-a121-0899af7f81ab-kube-api-access-mtwvn\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083692 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msldt\" (UniqueName: \"kubernetes.io/projected/8d4a0088-93eb-4841-84dd-052dc087ab13-kube-api-access-msldt\") pod \"route-controller-manager-6576b87f9c-2vxgk\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083719 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d166f48f-7e2e-4c0f-a121-0899af7f81ab-encryption-config\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083753 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d4a0088-93eb-4841-84dd-052dc087ab13-config\") pod \"route-controller-manager-6576b87f9c-2vxgk\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083801 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8d4a0088-93eb-4841-84dd-052dc087ab13-client-ca\") pod \"route-controller-manager-6576b87f9c-2vxgk\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083829 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-config\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083869 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-etcd-serving-ca\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083899 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/bc0f5871-442b-4fa3-863c-173c2df1ffd4-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-8gf7r\" (UID: \"bc0f5871-442b-4fa3-863c-173c2df1ffd4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083932 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d166f48f-7e2e-4c0f-a121-0899af7f81ab-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083958 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-serving-cert\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.083986 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-encryption-config\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.084013 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-audit-dir\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.084364 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.084403 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-47x5d"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.089028 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.108174 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.108691 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-47x5d" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.109037 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.112379 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-shg9w"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.125016 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-4856l"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.125473 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.128911 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.133322 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.133487 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.133744 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.133917 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.135635 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.135816 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.136258 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.136401 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.136530 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.136661 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.136775 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.136907 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.137040 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.137171 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.138185 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.138298 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.138396 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.138509 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.138634 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.138731 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.138873 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.139135 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.139149 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.139170 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.139180 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.139215 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.139223 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.143528 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.143593 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.144837 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.144977 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.145476 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.148237 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.150261 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kdgbm"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.150985 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.151668 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.153959 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.159168 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.162848 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.163169 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.163326 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.163953 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.164431 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.164545 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.164989 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6ncxd"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.165598 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6ncxd" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.165841 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.165913 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.166113 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.166193 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.166206 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.166232 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.166306 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.166675 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.168147 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.168586 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.168671 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-khq82"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.169460 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.172630 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-wktng"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.173192 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-wktng" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.177987 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.184987 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a90ac3d5-841b-49f1-a6f3-2647f598ab89-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-mlrxt\" (UID: \"a90ac3d5-841b-49f1-a6f3-2647f598ab89\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.185036 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/87561ea5-e7c7-4286-aa91-43c6478ff037-auth-proxy-config\") pod \"machine-approver-56656f9798-qptlh\" (UID: \"87561ea5-e7c7-4286-aa91-43c6478ff037\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.185059 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-serving-cert\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.185085 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/bc0f5871-442b-4fa3-863c-173c2df1ffd4-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-8gf7r\" (UID: \"bc0f5871-442b-4fa3-863c-173c2df1ffd4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186409 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/87561ea5-e7c7-4286-aa91-43c6478ff037-machine-approver-tls\") pod \"machine-approver-56656f9798-qptlh\" (UID: \"87561ea5-e7c7-4286-aa91-43c6478ff037\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186515 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4cf4299b-4d9a-4b11-bf53-34bf106d39ef-available-featuregates\") pod \"openshift-config-operator-7777fb866f-tdzzn\" (UID: \"4cf4299b-4d9a-4b11-bf53-34bf106d39ef\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186554 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186598 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d166f48f-7e2e-4c0f-a121-0899af7f81ab-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186621 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-serving-cert\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186641 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-encryption-config\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186661 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-audit-dir\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186679 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/373d4823-deb1-4353-afb0-fcc3894ecd5a-config\") pod \"console-operator-58897d9998-6lgsx\" (UID: \"373d4823-deb1-4353-afb0-fcc3894ecd5a\") " pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186698 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87561ea5-e7c7-4286-aa91-43c6478ff037-config\") pod \"machine-approver-56656f9798-qptlh\" (UID: \"87561ea5-e7c7-4286-aa91-43c6478ff037\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186718 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/00aa9da1-ce63-4c3f-bbec-4f1c97d85838-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-hs677\" (UID: \"00aa9da1-ce63-4c3f-bbec-4f1c97d85838\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186755 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58a13313-0bca-42ed-99e3-1d7898d5458d-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-wkpdc\" (UID: \"58a13313-0bca-42ed-99e3-1d7898d5458d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186774 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186794 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-serving-cert\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186835 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4cf4299b-4d9a-4b11-bf53-34bf106d39ef-serving-cert\") pod \"openshift-config-operator-7777fb866f-tdzzn\" (UID: \"4cf4299b-4d9a-4b11-bf53-34bf106d39ef\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186855 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/373d4823-deb1-4353-afb0-fcc3894ecd5a-serving-cert\") pod \"console-operator-58897d9998-6lgsx\" (UID: \"373d4823-deb1-4353-afb0-fcc3894ecd5a\") " pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186892 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ksdd8\" (UniqueName: \"kubernetes.io/projected/bc0f5871-442b-4fa3-863c-173c2df1ffd4-kube-api-access-ksdd8\") pod \"machine-api-operator-5694c8668f-8gf7r\" (UID: \"bc0f5871-442b-4fa3-863c-173c2df1ffd4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186909 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186927 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1fcc231e-278e-466a-97b3-34dc4a705b35-serving-cert\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186952 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58a13313-0bca-42ed-99e3-1d7898d5458d-config\") pod \"openshift-apiserver-operator-796bbdcf4f-wkpdc\" (UID: \"58a13313-0bca-42ed-99e3-1d7898d5458d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.186977 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppx8r\" (UniqueName: \"kubernetes.io/projected/1fcc231e-278e-466a-97b3-34dc4a705b35-kube-api-access-ppx8r\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.187003 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-node-pullsecrets\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.187025 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dn7g5\" (UniqueName: \"kubernetes.io/projected/a90ac3d5-841b-49f1-a6f3-2647f598ab89-kube-api-access-dn7g5\") pod \"cluster-image-registry-operator-dc59b4c8b-mlrxt\" (UID: \"a90ac3d5-841b-49f1-a6f3-2647f598ab89\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.187046 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d166f48f-7e2e-4c0f-a121-0899af7f81ab-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.187064 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-config\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.187080 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1fcc231e-278e-466a-97b3-34dc4a705b35-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.187096 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1fcc231e-278e-466a-97b3-34dc4a705b35-service-ca-bundle\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.187113 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.187138 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-audit\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.187158 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a78730f-7e10-472d-b99f-30aaec803f7e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-4fdmp\" (UID: \"6a78730f-7e10-472d-b99f-30aaec803f7e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.187176 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t5l4w\" (UniqueName: \"kubernetes.io/projected/87561ea5-e7c7-4286-aa91-43c6478ff037-kube-api-access-t5l4w\") pod \"machine-approver-56656f9798-qptlh\" (UID: \"87561ea5-e7c7-4286-aa91-43c6478ff037\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.194544 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8d4a0088-93eb-4841-84dd-052dc087ab13-serving-cert\") pod \"route-controller-manager-6576b87f9c-2vxgk\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.194584 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.194613 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2z7z\" (UniqueName: \"kubernetes.io/projected/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-kube-api-access-t2z7z\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.194872 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bc0f5871-442b-4fa3-863c-173c2df1ffd4-images\") pod \"machine-api-operator-5694c8668f-8gf7r\" (UID: \"bc0f5871-442b-4fa3-863c-173c2df1ffd4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.194892 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hj4lx\" (UniqueName: \"kubernetes.io/projected/373d4823-deb1-4353-afb0-fcc3894ecd5a-kube-api-access-hj4lx\") pod \"console-operator-58897d9998-6lgsx\" (UID: \"373d4823-deb1-4353-afb0-fcc3894ecd5a\") " pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.194935 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d166f48f-7e2e-4c0f-a121-0899af7f81ab-serving-cert\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.194956 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d166f48f-7e2e-4c0f-a121-0899af7f81ab-audit-dir\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.195125 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-image-import-ca\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.195161 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-etcd-service-ca\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.195202 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7mknb\" (UniqueName: \"kubernetes.io/projected/4cf4299b-4d9a-4b11-bf53-34bf106d39ef-kube-api-access-7mknb\") pod \"openshift-config-operator-7777fb866f-tdzzn\" (UID: \"4cf4299b-4d9a-4b11-bf53-34bf106d39ef\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.195390 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d166f48f-7e2e-4c0f-a121-0899af7f81ab-etcd-client\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.195423 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a90ac3d5-841b-49f1-a6f3-2647f598ab89-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-mlrxt\" (UID: \"a90ac3d5-841b-49f1-a6f3-2647f598ab89\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.195447 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-config\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.195469 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.195614 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ww68h\" (UniqueName: \"kubernetes.io/projected/6a78730f-7e10-472d-b99f-30aaec803f7e-kube-api-access-ww68h\") pod \"openshift-controller-manager-operator-756b6f6bc6-4fdmp\" (UID: \"6a78730f-7e10-472d-b99f-30aaec803f7e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.195633 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.195653 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.195798 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-etcd-client\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.195821 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/a90ac3d5-841b-49f1-a6f3-2647f598ab89-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-mlrxt\" (UID: \"a90ac3d5-841b-49f1-a6f3-2647f598ab89\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.195850 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-etcd-client\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.195868 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-service-ca\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196004 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpr67\" (UniqueName: \"kubernetes.io/projected/00aa9da1-ce63-4c3f-bbec-4f1c97d85838-kube-api-access-rpr67\") pod \"cluster-samples-operator-665b6dd947-hs677\" (UID: \"00aa9da1-ce63-4c3f-bbec-4f1c97d85838\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196046 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d166f48f-7e2e-4c0f-a121-0899af7f81ab-audit-policies\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196074 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gwdt6\" (UniqueName: \"kubernetes.io/projected/58a13313-0bca-42ed-99e3-1d7898d5458d-kube-api-access-gwdt6\") pod \"openshift-apiserver-operator-796bbdcf4f-wkpdc\" (UID: \"58a13313-0bca-42ed-99e3-1d7898d5458d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196261 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-etcd-ca\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196292 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-oauth-serving-cert\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196319 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc0f5871-442b-4fa3-863c-173c2df1ffd4-config\") pod \"machine-api-operator-5694c8668f-8gf7r\" (UID: \"bc0f5871-442b-4fa3-863c-173c2df1ffd4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196354 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f63a8e93-5656-4edc-9ee5-24314ebf749a-audit-dir\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196537 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rcfwh\" (UniqueName: \"kubernetes.io/projected/761b3955-fd92-419a-934c-31f294bbecde-kube-api-access-rcfwh\") pod \"downloads-7954f5f757-4hf2m\" (UID: \"761b3955-fd92-419a-934c-31f294bbecde\") " pod="openshift-console/downloads-7954f5f757-4hf2m" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196562 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196583 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/685f36d8-75f7-4b23-8eba-081657468d03-metrics-tls\") pod \"dns-operator-744455d44c-47x5d\" (UID: \"685f36d8-75f7-4b23-8eba-081657468d03\") " pod="openshift-dns-operator/dns-operator-744455d44c-47x5d" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196599 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-trusted-ca-bundle\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196758 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtwvn\" (UniqueName: \"kubernetes.io/projected/d166f48f-7e2e-4c0f-a121-0899af7f81ab-kube-api-access-mtwvn\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196778 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196798 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-oauth-config\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196816 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-config\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196949 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msldt\" (UniqueName: \"kubernetes.io/projected/8d4a0088-93eb-4841-84dd-052dc087ab13-kube-api-access-msldt\") pod \"route-controller-manager-6576b87f9c-2vxgk\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196969 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-audit-policies\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.196989 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/373d4823-deb1-4353-afb0-fcc3894ecd5a-trusted-ca\") pod \"console-operator-58897d9998-6lgsx\" (UID: \"373d4823-deb1-4353-afb0-fcc3894ecd5a\") " pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.197126 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zmthr\" (UniqueName: \"kubernetes.io/projected/685f36d8-75f7-4b23-8eba-081657468d03-kube-api-access-zmthr\") pod \"dns-operator-744455d44c-47x5d\" (UID: \"685f36d8-75f7-4b23-8eba-081657468d03\") " pod="openshift-dns-operator/dns-operator-744455d44c-47x5d" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.197146 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g4cmj\" (UniqueName: \"kubernetes.io/projected/3e82986a-2957-4450-b122-a47b6d65fd63-kube-api-access-g4cmj\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.197171 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d166f48f-7e2e-4c0f-a121-0899af7f81ab-encryption-config\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.197191 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.197356 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dg9jx\" (UniqueName: \"kubernetes.io/projected/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-kube-api-access-dg9jx\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.197377 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-client-ca\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.197394 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-flhmw\" (UniqueName: \"kubernetes.io/projected/f63a8e93-5656-4edc-9ee5-24314ebf749a-kube-api-access-flhmw\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.197544 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d4a0088-93eb-4841-84dd-052dc087ab13-config\") pod \"route-controller-manager-6576b87f9c-2vxgk\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.197728 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8d4a0088-93eb-4841-84dd-052dc087ab13-client-ca\") pod \"route-controller-manager-6576b87f9c-2vxgk\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.197770 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-config\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.197827 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a78730f-7e10-472d-b99f-30aaec803f7e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-4fdmp\" (UID: \"6a78730f-7e10-472d-b99f-30aaec803f7e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.197995 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e82986a-2957-4450-b122-a47b6d65fd63-serving-cert\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.198066 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.198106 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-etcd-serving-ca\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.198227 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nphb\" (UniqueName: \"kubernetes.io/projected/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-kube-api-access-2nphb\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.198296 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.198349 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fcc231e-278e-466a-97b3-34dc4a705b35-config\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.212409 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.214861 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc0f5871-442b-4fa3-863c-173c2df1ffd4-config\") pod \"machine-api-operator-5694c8668f-8gf7r\" (UID: \"bc0f5871-442b-4fa3-863c-173c2df1ffd4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.217765 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/bc0f5871-442b-4fa3-863c-173c2df1ffd4-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-8gf7r\" (UID: \"bc0f5871-442b-4fa3-863c-173c2df1ffd4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.218518 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.219798 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.221910 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/d166f48f-7e2e-4c0f-a121-0899af7f81ab-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.222479 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.223709 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-etcd-serving-ca\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.224453 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/d166f48f-7e2e-4c0f-a121-0899af7f81ab-audit-policies\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.224497 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58a13313-0bca-42ed-99e3-1d7898d5458d-config\") pod \"openshift-apiserver-operator-796bbdcf4f-wkpdc\" (UID: \"58a13313-0bca-42ed-99e3-1d7898d5458d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.224564 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-node-pullsecrets\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.225967 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/d166f48f-7e2e-4c0f-a121-0899af7f81ab-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.226315 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-config\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.227522 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-audit-dir\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.228464 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/bc0f5871-442b-4fa3-863c-173c2df1ffd4-images\") pod \"machine-api-operator-5694c8668f-8gf7r\" (UID: \"bc0f5871-442b-4fa3-863c-173c2df1ffd4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.229048 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-serving-cert\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.229368 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-audit\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.229733 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8d4a0088-93eb-4841-84dd-052dc087ab13-client-ca\") pod \"route-controller-manager-6576b87f9c-2vxgk\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.229776 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/d166f48f-7e2e-4c0f-a121-0899af7f81ab-audit-dir\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.229950 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d166f48f-7e2e-4c0f-a121-0899af7f81ab-serving-cert\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.231818 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-trusted-ca-bundle\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.232128 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-image-import-ca\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.232350 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-encryption-config\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.232849 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.233744 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/d166f48f-7e2e-4c0f-a121-0899af7f81ab-encryption-config\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.233813 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d4a0088-93eb-4841-84dd-052dc087ab13-config\") pod \"route-controller-manager-6576b87f9c-2vxgk\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.236137 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/d166f48f-7e2e-4c0f-a121-0899af7f81ab-etcd-client\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.237101 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-etcd-client\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.240000 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/58a13313-0bca-42ed-99e3-1d7898d5458d-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-wkpdc\" (UID: \"58a13313-0bca-42ed-99e3-1d7898d5458d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.242936 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.244291 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.245309 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8d4a0088-93eb-4841-84dd-052dc087ab13-serving-cert\") pod \"route-controller-manager-6576b87f9c-2vxgk\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.247920 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-7bhcw"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.248292 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.252474 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-tbvtg"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.252674 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-7bhcw" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.254737 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-cfjc4"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.255094 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tbvtg" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.255914 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.256189 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.257581 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.257878 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.258193 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.258225 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-8gf7r"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.258415 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.258568 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-krfkq"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.260128 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.260567 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-krfkq" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.261122 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.261343 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.261689 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.263579 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-4hf2m"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.263619 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-6lgsx"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.264819 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-k7kph"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.266199 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5ncw9"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.267620 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.268724 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.269745 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.270026 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-78rzv"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.271526 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-z8b2b"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.271778 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.274578 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.274692 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-4856l"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.274771 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-z8b2b" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.274975 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.275907 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-47x5d"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.277032 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.278580 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.279079 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-78rzv"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.280299 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6ncxd"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.281083 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.282067 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.282128 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.283070 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-w78cp"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.284856 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-qmbhh"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.285138 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.286261 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-wktng"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.286384 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-qmbhh" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.286586 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-shg9w"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.287569 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-tbvtg"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.288864 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-krfkq"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.289845 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.291992 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-swtsr"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.293385 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.294424 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.295491 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kdgbm"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.296586 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-jqcq6"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299487 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-oauth-config\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299549 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-config\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299582 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-trusted-ca-bundle\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299624 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299660 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-audit-policies\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299685 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/373d4823-deb1-4353-afb0-fcc3894ecd5a-trusted-ca\") pod \"console-operator-58897d9998-6lgsx\" (UID: \"373d4823-deb1-4353-afb0-fcc3894ecd5a\") " pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299709 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zmthr\" (UniqueName: \"kubernetes.io/projected/685f36d8-75f7-4b23-8eba-081657468d03-kube-api-access-zmthr\") pod \"dns-operator-744455d44c-47x5d\" (UID: \"685f36d8-75f7-4b23-8eba-081657468d03\") " pod="openshift-dns-operator/dns-operator-744455d44c-47x5d" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299751 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dg9jx\" (UniqueName: \"kubernetes.io/projected/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-kube-api-access-dg9jx\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299775 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-client-ca\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299806 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g4cmj\" (UniqueName: \"kubernetes.io/projected/3e82986a-2957-4450-b122-a47b6d65fd63-kube-api-access-g4cmj\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299848 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299874 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a78730f-7e10-472d-b99f-30aaec803f7e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-4fdmp\" (UID: \"6a78730f-7e10-472d-b99f-30aaec803f7e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299900 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-flhmw\" (UniqueName: \"kubernetes.io/projected/f63a8e93-5656-4edc-9ee5-24314ebf749a-kube-api-access-flhmw\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299953 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e82986a-2957-4450-b122-a47b6d65fd63-serving-cert\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.299987 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3cd48358-1086-47f5-aab1-3acea0c01379-config\") pod \"kube-controller-manager-operator-78b949d7b-jwwk4\" (UID: \"3cd48358-1086-47f5-aab1-3acea0c01379\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300044 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1e48d81-784e-4803-8f4c-838f551cf7e0-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-d89ck\" (UID: \"f1e48d81-784e-4803-8f4c-838f551cf7e0\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300082 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300105 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-62t7b\" (UID: \"bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300128 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nphb\" (UniqueName: \"kubernetes.io/projected/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-kube-api-access-2nphb\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300152 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5c80d9d-f8b4-4a0e-9787-818e7b029259-config\") pod \"kube-apiserver-operator-766d6c64bb-8pgqj\" (UID: \"d5c80d9d-f8b4-4a0e-9787-818e7b029259\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300183 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300237 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fcc231e-278e-466a-97b3-34dc4a705b35-config\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300289 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5c80d9d-f8b4-4a0e-9787-818e7b029259-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-8pgqj\" (UID: \"d5c80d9d-f8b4-4a0e-9787-818e7b029259\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300316 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/53f9472e-3cd0-4f90-8320-691acacf6482-profile-collector-cert\") pod \"olm-operator-6b444d44fb-sjcdg\" (UID: \"53f9472e-3cd0-4f90-8320-691acacf6482\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300345 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a90ac3d5-841b-49f1-a6f3-2647f598ab89-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-mlrxt\" (UID: \"a90ac3d5-841b-49f1-a6f3-2647f598ab89\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300372 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/87561ea5-e7c7-4286-aa91-43c6478ff037-auth-proxy-config\") pod \"machine-approver-56656f9798-qptlh\" (UID: \"87561ea5-e7c7-4286-aa91-43c6478ff037\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300396 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-serving-cert\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300422 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/937d04f6-f3ce-47ef-9c90-bb5aae951969-bound-sa-token\") pod \"ingress-operator-5b745b69d9-t2d5g\" (UID: \"937d04f6-f3ce-47ef-9c90-bb5aae951969\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300475 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4cf4299b-4d9a-4b11-bf53-34bf106d39ef-available-featuregates\") pod \"openshift-config-operator-7777fb866f-tdzzn\" (UID: \"4cf4299b-4d9a-4b11-bf53-34bf106d39ef\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300500 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/87561ea5-e7c7-4286-aa91-43c6478ff037-machine-approver-tls\") pod \"machine-approver-56656f9798-qptlh\" (UID: \"87561ea5-e7c7-4286-aa91-43c6478ff037\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300545 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300587 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/373d4823-deb1-4353-afb0-fcc3894ecd5a-config\") pod \"console-operator-58897d9998-6lgsx\" (UID: \"373d4823-deb1-4353-afb0-fcc3894ecd5a\") " pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300663 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87561ea5-e7c7-4286-aa91-43c6478ff037-config\") pod \"machine-approver-56656f9798-qptlh\" (UID: \"87561ea5-e7c7-4286-aa91-43c6478ff037\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300712 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/00aa9da1-ce63-4c3f-bbec-4f1c97d85838-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-hs677\" (UID: \"00aa9da1-ce63-4c3f-bbec-4f1c97d85838\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300740 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-serving-cert\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300785 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300810 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4cf4299b-4d9a-4b11-bf53-34bf106d39ef-serving-cert\") pod \"openshift-config-operator-7777fb866f-tdzzn\" (UID: \"4cf4299b-4d9a-4b11-bf53-34bf106d39ef\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300855 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300879 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/373d4823-deb1-4353-afb0-fcc3894ecd5a-serving-cert\") pod \"console-operator-58897d9998-6lgsx\" (UID: \"373d4823-deb1-4353-afb0-fcc3894ecd5a\") " pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300914 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1fcc231e-278e-466a-97b3-34dc4a705b35-serving-cert\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300942 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3cd48358-1086-47f5-aab1-3acea0c01379-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-jwwk4\" (UID: \"3cd48358-1086-47f5-aab1-3acea0c01379\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.300970 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dxqx9\" (UniqueName: \"kubernetes.io/projected/937d04f6-f3ce-47ef-9c90-bb5aae951969-kube-api-access-dxqx9\") pod \"ingress-operator-5b745b69d9-t2d5g\" (UID: \"937d04f6-f3ce-47ef-9c90-bb5aae951969\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301014 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppx8r\" (UniqueName: \"kubernetes.io/projected/1fcc231e-278e-466a-97b3-34dc4a705b35-kube-api-access-ppx8r\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301028 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301041 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-62t7b\" (UID: \"bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301074 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1e48d81-784e-4803-8f4c-838f551cf7e0-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-d89ck\" (UID: \"f1e48d81-784e-4803-8f4c-838f551cf7e0\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301102 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqjlj\" (UniqueName: \"kubernetes.io/projected/53f9472e-3cd0-4f90-8320-691acacf6482-kube-api-access-pqjlj\") pod \"olm-operator-6b444d44fb-sjcdg\" (UID: \"53f9472e-3cd0-4f90-8320-691acacf6482\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301134 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dn7g5\" (UniqueName: \"kubernetes.io/projected/a90ac3d5-841b-49f1-a6f3-2647f598ab89-kube-api-access-dn7g5\") pod \"cluster-image-registry-operator-dc59b4c8b-mlrxt\" (UID: \"a90ac3d5-841b-49f1-a6f3-2647f598ab89\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301159 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1fcc231e-278e-466a-97b3-34dc4a705b35-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301188 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1fcc231e-278e-466a-97b3-34dc4a705b35-service-ca-bundle\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301216 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lpqb\" (UniqueName: \"kubernetes.io/projected/bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b-kube-api-access-8lpqb\") pod \"kube-storage-version-migrator-operator-b67b599dd-62t7b\" (UID: \"bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301275 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-config\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301471 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d5c80d9d-f8b4-4a0e-9787-818e7b029259-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-8pgqj\" (UID: \"d5c80d9d-f8b4-4a0e-9787-818e7b029259\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301502 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a78730f-7e10-472d-b99f-30aaec803f7e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-4fdmp\" (UID: \"6a78730f-7e10-472d-b99f-30aaec803f7e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301526 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t5l4w\" (UniqueName: \"kubernetes.io/projected/87561ea5-e7c7-4286-aa91-43c6478ff037-kube-api-access-t5l4w\") pod \"machine-approver-56656f9798-qptlh\" (UID: \"87561ea5-e7c7-4286-aa91-43c6478ff037\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301572 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-audit-policies\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301596 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301641 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8z8xq\" (UniqueName: \"kubernetes.io/projected/b8eb6262-2c30-4192-8936-9463698c361e-kube-api-access-8z8xq\") pod \"control-plane-machine-set-operator-78cbb6b69f-6ncxd\" (UID: \"b8eb6262-2c30-4192-8936-9463698c361e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6ncxd" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301676 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hj4lx\" (UniqueName: \"kubernetes.io/projected/373d4823-deb1-4353-afb0-fcc3894ecd5a-kube-api-access-hj4lx\") pod \"console-operator-58897d9998-6lgsx\" (UID: \"373d4823-deb1-4353-afb0-fcc3894ecd5a\") " pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301736 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-etcd-service-ca\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301777 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bz9z5\" (UniqueName: \"kubernetes.io/projected/c41d5586-c61c-42ec-a6c3-c22bf75f7f1e-kube-api-access-bz9z5\") pod \"multus-admission-controller-857f4d67dd-wktng\" (UID: \"c41d5586-c61c-42ec-a6c3-c22bf75f7f1e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-wktng" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.301941 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-trusted-ca-bundle\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.302090 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.303864 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/373d4823-deb1-4353-afb0-fcc3894ecd5a-trusted-ca\") pod \"console-operator-58897d9998-6lgsx\" (UID: \"373d4823-deb1-4353-afb0-fcc3894ecd5a\") " pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.306912 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1fcc231e-278e-466a-97b3-34dc4a705b35-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.309366 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-config\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.310023 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4cf4299b-4d9a-4b11-bf53-34bf106d39ef-serving-cert\") pod \"openshift-config-operator-7777fb866f-tdzzn\" (UID: \"4cf4299b-4d9a-4b11-bf53-34bf106d39ef\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.310843 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.311691 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-client-ca\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.311764 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.311767 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.312026 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-etcd-service-ca\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.312283 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/00aa9da1-ce63-4c3f-bbec-4f1c97d85838-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-hs677\" (UID: \"00aa9da1-ce63-4c3f-bbec-4f1c97d85838\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.312340 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1fcc231e-278e-466a-97b3-34dc4a705b35-config\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.313395 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/87561ea5-e7c7-4286-aa91-43c6478ff037-auth-proxy-config\") pod \"machine-approver-56656f9798-qptlh\" (UID: \"87561ea5-e7c7-4286-aa91-43c6478ff037\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.313910 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-oauth-config\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.314703 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4cf4299b-4d9a-4b11-bf53-34bf106d39ef-available-featuregates\") pod \"openshift-config-operator-7777fb866f-tdzzn\" (UID: \"4cf4299b-4d9a-4b11-bf53-34bf106d39ef\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.302103 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7mknb\" (UniqueName: \"kubernetes.io/projected/4cf4299b-4d9a-4b11-bf53-34bf106d39ef-kube-api-access-7mknb\") pod \"openshift-config-operator-7777fb866f-tdzzn\" (UID: \"4cf4299b-4d9a-4b11-bf53-34bf106d39ef\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.314856 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1e48d81-784e-4803-8f4c-838f551cf7e0-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-d89ck\" (UID: \"f1e48d81-784e-4803-8f4c-838f551cf7e0\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.314860 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a78730f-7e10-472d-b99f-30aaec803f7e-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-4fdmp\" (UID: \"6a78730f-7e10-472d-b99f-30aaec803f7e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.314914 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a90ac3d5-841b-49f1-a6f3-2647f598ab89-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-mlrxt\" (UID: \"a90ac3d5-841b-49f1-a6f3-2647f598ab89\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.314936 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-config\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.314958 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315056 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315081 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315112 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ww68h\" (UniqueName: \"kubernetes.io/projected/6a78730f-7e10-472d-b99f-30aaec803f7e-kube-api-access-ww68h\") pod \"openshift-controller-manager-operator-756b6f6bc6-4fdmp\" (UID: \"6a78730f-7e10-472d-b99f-30aaec803f7e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315134 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-etcd-client\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315155 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c41d5586-c61c-42ec-a6c3-c22bf75f7f1e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-wktng\" (UID: \"c41d5586-c61c-42ec-a6c3-c22bf75f7f1e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-wktng" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315286 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/b8eb6262-2c30-4192-8936-9463698c361e-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-6ncxd\" (UID: \"b8eb6262-2c30-4192-8936-9463698c361e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6ncxd" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315314 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/a90ac3d5-841b-49f1-a6f3-2647f598ab89-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-mlrxt\" (UID: \"a90ac3d5-841b-49f1-a6f3-2647f598ab89\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315342 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-service-ca\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315366 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpr67\" (UniqueName: \"kubernetes.io/projected/00aa9da1-ce63-4c3f-bbec-4f1c97d85838-kube-api-access-rpr67\") pod \"cluster-samples-operator-665b6dd947-hs677\" (UID: \"00aa9da1-ce63-4c3f-bbec-4f1c97d85838\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315461 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-oauth-serving-cert\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315506 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-etcd-ca\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315529 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/937d04f6-f3ce-47ef-9c90-bb5aae951969-metrics-tls\") pod \"ingress-operator-5b745b69d9-t2d5g\" (UID: \"937d04f6-f3ce-47ef-9c90-bb5aae951969\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315559 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3cd48358-1086-47f5-aab1-3acea0c01379-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-jwwk4\" (UID: \"3cd48358-1086-47f5-aab1-3acea0c01379\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315662 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/937d04f6-f3ce-47ef-9c90-bb5aae951969-trusted-ca\") pod \"ingress-operator-5b745b69d9-t2d5g\" (UID: \"937d04f6-f3ce-47ef-9c90-bb5aae951969\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315683 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/53f9472e-3cd0-4f90-8320-691acacf6482-srv-cert\") pod \"olm-operator-6b444d44fb-sjcdg\" (UID: \"53f9472e-3cd0-4f90-8320-691acacf6482\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315726 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f63a8e93-5656-4edc-9ee5-24314ebf749a-audit-dir\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315761 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rcfwh\" (UniqueName: \"kubernetes.io/projected/761b3955-fd92-419a-934c-31f294bbecde-kube-api-access-rcfwh\") pod \"downloads-7954f5f757-4hf2m\" (UID: \"761b3955-fd92-419a-934c-31f294bbecde\") " pod="openshift-console/downloads-7954f5f757-4hf2m" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315857 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315881 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/685f36d8-75f7-4b23-8eba-081657468d03-metrics-tls\") pod \"dns-operator-744455d44c-47x5d\" (UID: \"685f36d8-75f7-4b23-8eba-081657468d03\") " pod="openshift-dns-operator/dns-operator-744455d44c-47x5d" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.317215 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/373d4823-deb1-4353-afb0-fcc3894ecd5a-config\") pod \"console-operator-58897d9998-6lgsx\" (UID: \"373d4823-deb1-4353-afb0-fcc3894ecd5a\") " pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.317945 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.318341 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.319288 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/87561ea5-e7c7-4286-aa91-43c6478ff037-config\") pod \"machine-approver-56656f9798-qptlh\" (UID: \"87561ea5-e7c7-4286-aa91-43c6478ff037\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.320128 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/685f36d8-75f7-4b23-8eba-081657468d03-metrics-tls\") pod \"dns-operator-744455d44c-47x5d\" (UID: \"685f36d8-75f7-4b23-8eba-081657468d03\") " pod="openshift-dns-operator/dns-operator-744455d44c-47x5d" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.320547 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-serving-cert\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.320602 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1fcc231e-278e-466a-97b3-34dc4a705b35-service-ca-bundle\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.320892 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.321900 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.322681 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-oauth-serving-cert\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.323631 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1fcc231e-278e-466a-97b3-34dc4a705b35-serving-cert\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.323675 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-config\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.324049 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a90ac3d5-841b-49f1-a6f3-2647f598ab89-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-mlrxt\" (UID: \"a90ac3d5-841b-49f1-a6f3-2647f598ab89\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.324147 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-etcd-ca\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.324182 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-serving-cert\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.324216 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f63a8e93-5656-4edc-9ee5-24314ebf749a-audit-dir\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.315753 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a78730f-7e10-472d-b99f-30aaec803f7e-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-4fdmp\" (UID: \"6a78730f-7e10-472d-b99f-30aaec803f7e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.324545 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.325184 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e82986a-2957-4450-b122-a47b6d65fd63-serving-cert\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.325608 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-service-ca\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.325804 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/373d4823-deb1-4353-afb0-fcc3894ecd5a-serving-cert\") pod \"console-operator-58897d9998-6lgsx\" (UID: \"373d4823-deb1-4353-afb0-fcc3894ecd5a\") " pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.328179 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-etcd-client\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.328354 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/a90ac3d5-841b-49f1-a6f3-2647f598ab89-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-mlrxt\" (UID: \"a90ac3d5-841b-49f1-a6f3-2647f598ab89\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.328752 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.328748 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.329438 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.330114 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-khq82"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.330329 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.332840 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/87561ea5-e7c7-4286-aa91-43c6478ff037-machine-approver-tls\") pod \"machine-approver-56656f9798-qptlh\" (UID: \"87561ea5-e7c7-4286-aa91-43c6478ff037\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.333130 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.333287 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-w78cp"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.338199 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.340610 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-qmbhh"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.342227 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.342291 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.343351 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-config\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.343684 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.345327 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-7bhcw"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.347477 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.349469 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.350647 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-sfx28"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.351409 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-sfx28" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.351928 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-sfx28"] Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.390538 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.401847 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.416799 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3cd48358-1086-47f5-aab1-3acea0c01379-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-jwwk4\" (UID: \"3cd48358-1086-47f5-aab1-3acea0c01379\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.416841 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dxqx9\" (UniqueName: \"kubernetes.io/projected/937d04f6-f3ce-47ef-9c90-bb5aae951969-kube-api-access-dxqx9\") pod \"ingress-operator-5b745b69d9-t2d5g\" (UID: \"937d04f6-f3ce-47ef-9c90-bb5aae951969\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.416880 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-62t7b\" (UID: \"bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.416931 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1e48d81-784e-4803-8f4c-838f551cf7e0-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-d89ck\" (UID: \"f1e48d81-784e-4803-8f4c-838f551cf7e0\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.416954 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqjlj\" (UniqueName: \"kubernetes.io/projected/53f9472e-3cd0-4f90-8320-691acacf6482-kube-api-access-pqjlj\") pod \"olm-operator-6b444d44fb-sjcdg\" (UID: \"53f9472e-3cd0-4f90-8320-691acacf6482\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.416986 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lpqb\" (UniqueName: \"kubernetes.io/projected/bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b-kube-api-access-8lpqb\") pod \"kube-storage-version-migrator-operator-b67b599dd-62t7b\" (UID: \"bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.417012 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d5c80d9d-f8b4-4a0e-9787-818e7b029259-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-8pgqj\" (UID: \"d5c80d9d-f8b4-4a0e-9787-818e7b029259\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.417042 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8z8xq\" (UniqueName: \"kubernetes.io/projected/b8eb6262-2c30-4192-8936-9463698c361e-kube-api-access-8z8xq\") pod \"control-plane-machine-set-operator-78cbb6b69f-6ncxd\" (UID: \"b8eb6262-2c30-4192-8936-9463698c361e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6ncxd" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.417094 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bz9z5\" (UniqueName: \"kubernetes.io/projected/c41d5586-c61c-42ec-a6c3-c22bf75f7f1e-kube-api-access-bz9z5\") pod \"multus-admission-controller-857f4d67dd-wktng\" (UID: \"c41d5586-c61c-42ec-a6c3-c22bf75f7f1e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-wktng" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.417146 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1e48d81-784e-4803-8f4c-838f551cf7e0-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-d89ck\" (UID: \"f1e48d81-784e-4803-8f4c-838f551cf7e0\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.417178 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c41d5586-c61c-42ec-a6c3-c22bf75f7f1e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-wktng\" (UID: \"c41d5586-c61c-42ec-a6c3-c22bf75f7f1e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-wktng" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.417211 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/b8eb6262-2c30-4192-8936-9463698c361e-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-6ncxd\" (UID: \"b8eb6262-2c30-4192-8936-9463698c361e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6ncxd" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.417263 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3cd48358-1086-47f5-aab1-3acea0c01379-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-jwwk4\" (UID: \"3cd48358-1086-47f5-aab1-3acea0c01379\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.417306 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/937d04f6-f3ce-47ef-9c90-bb5aae951969-metrics-tls\") pod \"ingress-operator-5b745b69d9-t2d5g\" (UID: \"937d04f6-f3ce-47ef-9c90-bb5aae951969\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.417339 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/937d04f6-f3ce-47ef-9c90-bb5aae951969-trusted-ca\") pod \"ingress-operator-5b745b69d9-t2d5g\" (UID: \"937d04f6-f3ce-47ef-9c90-bb5aae951969\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.417364 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/53f9472e-3cd0-4f90-8320-691acacf6482-srv-cert\") pod \"olm-operator-6b444d44fb-sjcdg\" (UID: \"53f9472e-3cd0-4f90-8320-691acacf6482\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.417932 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3cd48358-1086-47f5-aab1-3acea0c01379-config\") pod \"kube-controller-manager-operator-78b949d7b-jwwk4\" (UID: \"3cd48358-1086-47f5-aab1-3acea0c01379\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.417973 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1e48d81-784e-4803-8f4c-838f551cf7e0-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-d89ck\" (UID: \"f1e48d81-784e-4803-8f4c-838f551cf7e0\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.418006 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-62t7b\" (UID: \"bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.418049 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5c80d9d-f8b4-4a0e-9787-818e7b029259-config\") pod \"kube-apiserver-operator-766d6c64bb-8pgqj\" (UID: \"d5c80d9d-f8b4-4a0e-9787-818e7b029259\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.418088 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/937d04f6-f3ce-47ef-9c90-bb5aae951969-bound-sa-token\") pod \"ingress-operator-5b745b69d9-t2d5g\" (UID: \"937d04f6-f3ce-47ef-9c90-bb5aae951969\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.418116 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5c80d9d-f8b4-4a0e-9787-818e7b029259-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-8pgqj\" (UID: \"d5c80d9d-f8b4-4a0e-9787-818e7b029259\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.418141 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/53f9472e-3cd0-4f90-8320-691acacf6482-profile-collector-cert\") pod \"olm-operator-6b444d44fb-sjcdg\" (UID: \"53f9472e-3cd0-4f90-8320-691acacf6482\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.418474 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/937d04f6-f3ce-47ef-9c90-bb5aae951969-trusted-ca\") pod \"ingress-operator-5b745b69d9-t2d5g\" (UID: \"937d04f6-f3ce-47ef-9c90-bb5aae951969\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.420773 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.421165 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/937d04f6-f3ce-47ef-9c90-bb5aae951969-metrics-tls\") pod \"ingress-operator-5b745b69d9-t2d5g\" (UID: \"937d04f6-f3ce-47ef-9c90-bb5aae951969\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.442667 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.461372 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.480672 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.501099 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.520932 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.541281 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.561988 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.580968 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.594736 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3cd48358-1086-47f5-aab1-3acea0c01379-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-jwwk4\" (UID: \"3cd48358-1086-47f5-aab1-3acea0c01379\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.602756 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.611100 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3cd48358-1086-47f5-aab1-3acea0c01379-config\") pod \"kube-controller-manager-operator-78b949d7b-jwwk4\" (UID: \"3cd48358-1086-47f5-aab1-3acea0c01379\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.622209 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.631772 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d5c80d9d-f8b4-4a0e-9787-818e7b029259-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-8pgqj\" (UID: \"d5c80d9d-f8b4-4a0e-9787-818e7b029259\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.641665 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.661274 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.680004 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.690034 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d5c80d9d-f8b4-4a0e-9787-818e7b029259-config\") pod \"kube-apiserver-operator-766d6c64bb-8pgqj\" (UID: \"d5c80d9d-f8b4-4a0e-9787-818e7b029259\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.701780 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.721179 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.731387 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/b8eb6262-2c30-4192-8936-9463698c361e-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-6ncxd\" (UID: \"b8eb6262-2c30-4192-8936-9463698c361e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6ncxd" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.740497 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.750857 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f1e48d81-784e-4803-8f4c-838f551cf7e0-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-d89ck\" (UID: \"f1e48d81-784e-4803-8f4c-838f551cf7e0\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.761052 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.780922 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.792172 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f1e48d81-784e-4803-8f4c-838f551cf7e0-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-d89ck\" (UID: \"f1e48d81-784e-4803-8f4c-838f551cf7e0\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.801536 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.821122 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.840607 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.861356 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.873144 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-62t7b\" (UID: \"bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.881029 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.888415 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-62t7b\" (UID: \"bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.902418 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.921766 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.943051 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.962459 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.982653 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 21:33:05 crc kubenswrapper[4910]: I1125 21:33:05.994058 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/c41d5586-c61c-42ec-a6c3-c22bf75f7f1e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-wktng\" (UID: \"c41d5586-c61c-42ec-a6c3-c22bf75f7f1e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-wktng" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.031672 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtwvn\" (UniqueName: \"kubernetes.io/projected/d166f48f-7e2e-4c0f-a121-0899af7f81ab-kube-api-access-mtwvn\") pod \"apiserver-7bbb656c7d-tfpf5\" (UID: \"d166f48f-7e2e-4c0f-a121-0899af7f81ab\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.048474 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.050283 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msldt\" (UniqueName: \"kubernetes.io/projected/8d4a0088-93eb-4841-84dd-052dc087ab13-kube-api-access-msldt\") pod \"route-controller-manager-6576b87f9c-2vxgk\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.067827 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2z7z\" (UniqueName: \"kubernetes.io/projected/df48041a-e8d7-49d2-a8a8-159d5ed5ec6d-kube-api-access-t2z7z\") pod \"apiserver-76f77b778f-k7kph\" (UID: \"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d\") " pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.072630 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.087011 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ksdd8\" (UniqueName: \"kubernetes.io/projected/bc0f5871-442b-4fa3-863c-173c2df1ffd4-kube-api-access-ksdd8\") pod \"machine-api-operator-5694c8668f-8gf7r\" (UID: \"bc0f5871-442b-4fa3-863c-173c2df1ffd4\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.101786 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gwdt6\" (UniqueName: \"kubernetes.io/projected/58a13313-0bca-42ed-99e3-1d7898d5458d-kube-api-access-gwdt6\") pod \"openshift-apiserver-operator-796bbdcf4f-wkpdc\" (UID: \"58a13313-0bca-42ed-99e3-1d7898d5458d\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.102429 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.119151 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.122878 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.134596 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/53f9472e-3cd0-4f90-8320-691acacf6482-srv-cert\") pod \"olm-operator-6b444d44fb-sjcdg\" (UID: \"53f9472e-3cd0-4f90-8320-691acacf6482\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.141029 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.154473 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/53f9472e-3cd0-4f90-8320-691acacf6482-profile-collector-cert\") pod \"olm-operator-6b444d44fb-sjcdg\" (UID: \"53f9472e-3cd0-4f90-8320-691acacf6482\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.162738 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.163770 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.185490 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.221970 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.241806 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.252728 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.269400 4910 request.go:700] Waited for 1.01961395s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dmco-proxy-tls&limit=500&resourceVersion=0 Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.271932 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.281631 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.316333 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.320309 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.342405 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.362292 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.374344 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5"] Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.382841 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.403763 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.428376 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.441646 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.450431 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk"] Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.461815 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.471848 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc"] Nov 25 21:33:06 crc kubenswrapper[4910]: W1125 21:33:06.472124 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8d4a0088_93eb_4841_84dd_052dc087ab13.slice/crio-551f70deba949e43ab2d5151e4dcf17505b4c033b311b2fb7010183f016ebbcd WatchSource:0}: Error finding container 551f70deba949e43ab2d5151e4dcf17505b4c033b311b2fb7010183f016ebbcd: Status 404 returned error can't find the container with id 551f70deba949e43ab2d5151e4dcf17505b4c033b311b2fb7010183f016ebbcd Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.482740 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.501997 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.521900 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.523624 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-8gf7r"] Nov 25 21:33:06 crc kubenswrapper[4910]: W1125 21:33:06.534100 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbc0f5871_442b_4fa3_863c_173c2df1ffd4.slice/crio-bd54b0d7ef73eb373f05d9fdbeca0694e17e17bd38cc74bef8116d8310b5e5f4 WatchSource:0}: Error finding container bd54b0d7ef73eb373f05d9fdbeca0694e17e17bd38cc74bef8116d8310b5e5f4: Status 404 returned error can't find the container with id bd54b0d7ef73eb373f05d9fdbeca0694e17e17bd38cc74bef8116d8310b5e5f4 Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.543691 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.561864 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.581217 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.598047 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-k7kph"] Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.602057 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.622901 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.642234 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.662349 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.681237 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.701315 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.722706 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.741890 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.761772 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.781347 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.801204 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.830380 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.841281 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.861745 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.881595 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.902908 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.920441 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.941047 4910 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.963636 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 21:33:06 crc kubenswrapper[4910]: I1125 21:33:06.981942 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.003157 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.022597 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.041922 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.047552 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" event={"ID":"bc0f5871-442b-4fa3-863c-173c2df1ffd4","Type":"ContainerStarted","Data":"0c6ccafae2b7e9d5e93f6eaad77be714a65229218b6128b13cf756428a49c318"} Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.047627 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" event={"ID":"bc0f5871-442b-4fa3-863c-173c2df1ffd4","Type":"ContainerStarted","Data":"bd54b0d7ef73eb373f05d9fdbeca0694e17e17bd38cc74bef8116d8310b5e5f4"} Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.051347 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k7kph" event={"ID":"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d","Type":"ContainerStarted","Data":"eb13a36902fb62a2f619e4a81eafdef11bd8009c1b2405c7450d77a707d33845"} Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.054417 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" event={"ID":"8d4a0088-93eb-4841-84dd-052dc087ab13","Type":"ContainerStarted","Data":"deb6d0cbe60bd32f025220468c07c8a20f5526469e621d8e97b52fda76d90e1d"} Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.054493 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" event={"ID":"8d4a0088-93eb-4841-84dd-052dc087ab13","Type":"ContainerStarted","Data":"551f70deba949e43ab2d5151e4dcf17505b4c033b311b2fb7010183f016ebbcd"} Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.054528 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.056664 4910 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-2vxgk container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.056740 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" podUID="8d4a0088-93eb-4841-84dd-052dc087ab13" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.058781 4910 generic.go:334] "Generic (PLEG): container finished" podID="d166f48f-7e2e-4c0f-a121-0899af7f81ab" containerID="beecdc0d91e7759d02e723d6eb249b6ad4fef771ec387e8b8afc35b3eb2da9c4" exitCode=0 Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.058875 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" event={"ID":"d166f48f-7e2e-4c0f-a121-0899af7f81ab","Type":"ContainerDied","Data":"beecdc0d91e7759d02e723d6eb249b6ad4fef771ec387e8b8afc35b3eb2da9c4"} Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.058927 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" event={"ID":"d166f48f-7e2e-4c0f-a121-0899af7f81ab","Type":"ContainerStarted","Data":"2281807b249ac2243fb0fc176650d523591ae2fd4489bb9bbdc75278e26a2d32"} Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.062551 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc" event={"ID":"58a13313-0bca-42ed-99e3-1d7898d5458d","Type":"ContainerStarted","Data":"805dbe947ff1ee75d66833ef6390fba297f2a546fadc8dde86cdcb49b02c66c8"} Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.062588 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc" event={"ID":"58a13313-0bca-42ed-99e3-1d7898d5458d","Type":"ContainerStarted","Data":"a5fccf1bb7224bfef702c6bd5ad1da0eae80d021172363253962157026b8eb38"} Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.098102 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zmthr\" (UniqueName: \"kubernetes.io/projected/685f36d8-75f7-4b23-8eba-081657468d03-kube-api-access-zmthr\") pod \"dns-operator-744455d44c-47x5d\" (UID: \"685f36d8-75f7-4b23-8eba-081657468d03\") " pod="openshift-dns-operator/dns-operator-744455d44c-47x5d" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.102064 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7mknb\" (UniqueName: \"kubernetes.io/projected/4cf4299b-4d9a-4b11-bf53-34bf106d39ef-kube-api-access-7mknb\") pod \"openshift-config-operator-7777fb866f-tdzzn\" (UID: \"4cf4299b-4d9a-4b11-bf53-34bf106d39ef\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.129784 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dg9jx\" (UniqueName: \"kubernetes.io/projected/dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4-kube-api-access-dg9jx\") pod \"etcd-operator-b45778765-4856l\" (UID: \"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4\") " pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.144953 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.146393 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g4cmj\" (UniqueName: \"kubernetes.io/projected/3e82986a-2957-4450-b122-a47b6d65fd63-kube-api-access-g4cmj\") pod \"controller-manager-879f6c89f-swtsr\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.157926 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.159992 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hj4lx\" (UniqueName: \"kubernetes.io/projected/373d4823-deb1-4353-afb0-fcc3894ecd5a-kube-api-access-hj4lx\") pod \"console-operator-58897d9998-6lgsx\" (UID: \"373d4823-deb1-4353-afb0-fcc3894ecd5a\") " pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.164792 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-47x5d" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.178820 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a90ac3d5-841b-49f1-a6f3-2647f598ab89-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-mlrxt\" (UID: \"a90ac3d5-841b-49f1-a6f3-2647f598ab89\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.179494 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.202819 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t5l4w\" (UniqueName: \"kubernetes.io/projected/87561ea5-e7c7-4286-aa91-43c6478ff037-kube-api-access-t5l4w\") pod \"machine-approver-56656f9798-qptlh\" (UID: \"87561ea5-e7c7-4286-aa91-43c6478ff037\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.228185 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppx8r\" (UniqueName: \"kubernetes.io/projected/1fcc231e-278e-466a-97b3-34dc4a705b35-kube-api-access-ppx8r\") pod \"authentication-operator-69f744f599-5ncw9\" (UID: \"1fcc231e-278e-466a-97b3-34dc4a705b35\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.256016 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dn7g5\" (UniqueName: \"kubernetes.io/projected/a90ac3d5-841b-49f1-a6f3-2647f598ab89-kube-api-access-dn7g5\") pod \"cluster-image-registry-operator-dc59b4c8b-mlrxt\" (UID: \"a90ac3d5-841b-49f1-a6f3-2647f598ab89\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.257533 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-flhmw\" (UniqueName: \"kubernetes.io/projected/f63a8e93-5656-4edc-9ee5-24314ebf749a-kube-api-access-flhmw\") pod \"oauth-openshift-558db77b4-jqcq6\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.281799 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpr67\" (UniqueName: \"kubernetes.io/projected/00aa9da1-ce63-4c3f-bbec-4f1c97d85838-kube-api-access-rpr67\") pod \"cluster-samples-operator-665b6dd947-hs677\" (UID: \"00aa9da1-ce63-4c3f-bbec-4f1c97d85838\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.283239 4910 request.go:700] Waited for 1.962044307s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/serviceaccounts/console/token Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.316165 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nphb\" (UniqueName: \"kubernetes.io/projected/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-kube-api-access-2nphb\") pod \"console-f9d7485db-shg9w\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.323029 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rcfwh\" (UniqueName: \"kubernetes.io/projected/761b3955-fd92-419a-934c-31f294bbecde-kube-api-access-rcfwh\") pod \"downloads-7954f5f757-4hf2m\" (UID: \"761b3955-fd92-419a-934c-31f294bbecde\") " pod="openshift-console/downloads-7954f5f757-4hf2m" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.342934 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.353515 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ww68h\" (UniqueName: \"kubernetes.io/projected/6a78730f-7e10-472d-b99f-30aaec803f7e-kube-api-access-ww68h\") pod \"openshift-controller-manager-operator-756b6f6bc6-4fdmp\" (UID: \"6a78730f-7e10-472d-b99f-30aaec803f7e\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.361273 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.376855 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.377955 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn"] Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.381328 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.383795 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-4hf2m" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.393758 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:07 crc kubenswrapper[4910]: W1125 21:33:07.398771 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4cf4299b_4d9a_4b11_bf53_34bf106d39ef.slice/crio-5923bae8cdbdba85405205f47f727304ed866820c4f5facf7216cbbbf3282216 WatchSource:0}: Error finding container 5923bae8cdbdba85405205f47f727304ed866820c4f5facf7216cbbbf3282216: Status 404 returned error can't find the container with id 5923bae8cdbdba85405205f47f727304ed866820c4f5facf7216cbbbf3282216 Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.402000 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.405257 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.428663 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.433301 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-47x5d"] Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.442196 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.450904 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.467765 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dxqx9\" (UniqueName: \"kubernetes.io/projected/937d04f6-f3ce-47ef-9c90-bb5aae951969-kube-api-access-dxqx9\") pod \"ingress-operator-5b745b69d9-t2d5g\" (UID: \"937d04f6-f3ce-47ef-9c90-bb5aae951969\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.472177 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.480017 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/f1e48d81-784e-4803-8f4c-838f551cf7e0-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-d89ck\" (UID: \"f1e48d81-784e-4803-8f4c-838f551cf7e0\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck" Nov 25 21:33:07 crc kubenswrapper[4910]: W1125 21:33:07.482861 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod685f36d8_75f7_4b23_8eba_081657468d03.slice/crio-0b4c1f50a8ebf45838b85352886a6c0691bbed20066ad95014163524cf03a65d WatchSource:0}: Error finding container 0b4c1f50a8ebf45838b85352886a6c0691bbed20066ad95014163524cf03a65d: Status 404 returned error can't find the container with id 0b4c1f50a8ebf45838b85352886a6c0691bbed20066ad95014163524cf03a65d Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.487191 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.499102 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lpqb\" (UniqueName: \"kubernetes.io/projected/bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b-kube-api-access-8lpqb\") pod \"kube-storage-version-migrator-operator-b67b599dd-62t7b\" (UID: \"bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.520342 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8z8xq\" (UniqueName: \"kubernetes.io/projected/b8eb6262-2c30-4192-8936-9463698c361e-kube-api-access-8z8xq\") pod \"control-plane-machine-set-operator-78cbb6b69f-6ncxd\" (UID: \"b8eb6262-2c30-4192-8936-9463698c361e\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6ncxd" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.528591 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.530519 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.557543 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqjlj\" (UniqueName: \"kubernetes.io/projected/53f9472e-3cd0-4f90-8320-691acacf6482-kube-api-access-pqjlj\") pod \"olm-operator-6b444d44fb-sjcdg\" (UID: \"53f9472e-3cd0-4f90-8320-691acacf6482\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.563574 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d5c80d9d-f8b4-4a0e-9787-818e7b029259-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-8pgqj\" (UID: \"d5c80d9d-f8b4-4a0e-9787-818e7b029259\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.578137 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bz9z5\" (UniqueName: \"kubernetes.io/projected/c41d5586-c61c-42ec-a6c3-c22bf75f7f1e-kube-api-access-bz9z5\") pod \"multus-admission-controller-857f4d67dd-wktng\" (UID: \"c41d5586-c61c-42ec-a6c3-c22bf75f7f1e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-wktng" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.611536 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3cd48358-1086-47f5-aab1-3acea0c01379-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-jwwk4\" (UID: \"3cd48358-1086-47f5-aab1-3acea0c01379\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.638762 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677"] Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.639701 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/937d04f6-f3ce-47ef-9c90-bb5aae951969-bound-sa-token\") pod \"ingress-operator-5b745b69d9-t2d5g\" (UID: \"937d04f6-f3ce-47ef-9c90-bb5aae951969\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658176 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3eecf253-d2c2-46ae-97d9-317d07bd346b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-khq82\" (UID: \"3eecf253-d2c2-46ae-97d9-317d07bd346b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658268 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-bound-sa-token\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658324 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658351 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3f10744e-fb73-4689-979b-59c32ba0ae6a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658372 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2e21045d-f1d4-4e65-a7d5-5fd250dba564-proxy-tls\") pod \"machine-config-operator-74547568cd-ldfjq\" (UID: \"2e21045d-f1d4-4e65-a7d5-5fd250dba564\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658392 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8p5cw\" (UniqueName: \"kubernetes.io/projected/2e21045d-f1d4-4e65-a7d5-5fd250dba564-kube-api-access-8p5cw\") pod \"machine-config-operator-74547568cd-ldfjq\" (UID: \"2e21045d-f1d4-4e65-a7d5-5fd250dba564\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658412 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2e21045d-f1d4-4e65-a7d5-5fd250dba564-images\") pod \"machine-config-operator-74547568cd-ldfjq\" (UID: \"2e21045d-f1d4-4e65-a7d5-5fd250dba564\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658457 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2e21045d-f1d4-4e65-a7d5-5fd250dba564-auth-proxy-config\") pod \"machine-config-operator-74547568cd-ldfjq\" (UID: \"2e21045d-f1d4-4e65-a7d5-5fd250dba564\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658477 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7f5z9\" (UniqueName: \"kubernetes.io/projected/3eecf253-d2c2-46ae-97d9-317d07bd346b-kube-api-access-7f5z9\") pod \"machine-config-controller-84d6567774-khq82\" (UID: \"3eecf253-d2c2-46ae-97d9-317d07bd346b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658497 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxpch\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-kube-api-access-xxpch\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658516 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3f10744e-fb73-4689-979b-59c32ba0ae6a-trusted-ca\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658537 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-registry-tls\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658572 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3f10744e-fb73-4689-979b-59c32ba0ae6a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658607 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3f10744e-fb73-4689-979b-59c32ba0ae6a-registry-certificates\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.658639 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3eecf253-d2c2-46ae-97d9-317d07bd346b-proxy-tls\") pod \"machine-config-controller-84d6567774-khq82\" (UID: \"3eecf253-d2c2-46ae-97d9-317d07bd346b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" Nov 25 21:33:07 crc kubenswrapper[4910]: E1125 21:33:07.659110 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:08.159089689 +0000 UTC m=+143.621566011 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.669028 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-4hf2m"] Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.715335 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-4856l"] Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.738185 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-swtsr"] Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.761320 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.761881 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxpch\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-kube-api-access-xxpch\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.761912 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ffe31541-2d82-48bf-b8ae-eb586019573e-cert\") pod \"ingress-canary-sfx28\" (UID: \"ffe31541-2d82-48bf-b8ae-eb586019573e\") " pod="openshift-ingress-canary/ingress-canary-sfx28" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.761961 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3f10744e-fb73-4689-979b-59c32ba0ae6a-trusted-ca\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.761978 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2k254\" (UniqueName: \"kubernetes.io/projected/b6e50a82-11e5-4428-a1d7-f43cb9f1a2d8-kube-api-access-2k254\") pod \"migrator-59844c95c7-tbvtg\" (UID: \"b6e50a82-11e5-4428-a1d7-f43cb9f1a2d8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tbvtg" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.765587 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/06539de3-ec9c-42dd-b5cb-c23227463dba-stats-auth\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.771500 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06539de3-ec9c-42dd-b5cb-c23227463dba-service-ca-bundle\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:07 crc kubenswrapper[4910]: E1125 21:33:07.771812 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:08.271782697 +0000 UTC m=+143.734259009 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.774174 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-registry-tls\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.774234 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-78rzv\" (UID: \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\") " pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.774351 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ae41dbb9-e2c8-4fae-b739-534ec0e520d5-apiservice-cert\") pod \"packageserver-d55dfcdfc-6nn9f\" (UID: \"ae41dbb9-e2c8-4fae-b739-534ec0e520d5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.774379 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljmjk\" (UniqueName: \"kubernetes.io/projected/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-kube-api-access-ljmjk\") pod \"marketplace-operator-79b997595-78rzv\" (UID: \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\") " pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.774424 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-csi-data-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.774501 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-registration-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.775676 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3f10744e-fb73-4689-979b-59c32ba0ae6a-trusted-ca\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.775713 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/1e473d16-b8d7-4f50-966f-8c3536051b54-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rclxw\" (UID: \"1e473d16-b8d7-4f50-966f-8c3536051b54\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.776208 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3f10744e-fb73-4689-979b-59c32ba0ae6a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.777024 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c52d4\" (UniqueName: \"kubernetes.io/projected/85dbb42a-8a15-46fd-9cf1-5f48bebf8378-kube-api-access-c52d4\") pod \"machine-config-server-z8b2b\" (UID: \"85dbb42a-8a15-46fd-9cf1-5f48bebf8378\") " pod="openshift-machine-config-operator/machine-config-server-z8b2b" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.777666 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/089e4ab9-11b3-4b39-b74c-c61227722e66-metrics-tls\") pod \"dns-default-qmbhh\" (UID: \"089e4ab9-11b3-4b39-b74c-c61227722e66\") " pod="openshift-dns/dns-default-qmbhh" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.779449 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrd58\" (UniqueName: \"kubernetes.io/projected/ae41dbb9-e2c8-4fae-b739-534ec0e520d5-kube-api-access-xrd58\") pod \"packageserver-d55dfcdfc-6nn9f\" (UID: \"ae41dbb9-e2c8-4fae-b739-534ec0e520d5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.779609 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3f10744e-fb73-4689-979b-59c32ba0ae6a-registry-certificates\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.780517 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlz2m\" (UniqueName: \"kubernetes.io/projected/fba1f4e8-1272-428d-95eb-7e01208f7b97-kube-api-access-qlz2m\") pod \"collect-profiles-29401770-kk6cx\" (UID: \"fba1f4e8-1272-428d-95eb-7e01208f7b97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.780602 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5drh\" (UniqueName: \"kubernetes.io/projected/ffe31541-2d82-48bf-b8ae-eb586019573e-kube-api-access-w5drh\") pod \"ingress-canary-sfx28\" (UID: \"ffe31541-2d82-48bf-b8ae-eb586019573e\") " pod="openshift-ingress-canary/ingress-canary-sfx28" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.782633 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3eecf253-d2c2-46ae-97d9-317d07bd346b-proxy-tls\") pod \"machine-config-controller-84d6567774-khq82\" (UID: \"3eecf253-d2c2-46ae-97d9-317d07bd346b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.782692 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5jkg\" (UniqueName: \"kubernetes.io/projected/06539de3-ec9c-42dd-b5cb-c23227463dba-kube-api-access-x5jkg\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.783103 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3f10744e-fb73-4689-979b-59c32ba0ae6a-registry-certificates\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.783713 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd75b2d0-63c6-4106-83fd-f2c15129ce82-config\") pod \"service-ca-operator-777779d784-krfkq\" (UID: \"dd75b2d0-63c6-4106-83fd-f2c15129ce82\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-krfkq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.785242 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/ae41dbb9-e2c8-4fae-b739-534ec0e520d5-tmpfs\") pod \"packageserver-d55dfcdfc-6nn9f\" (UID: \"ae41dbb9-e2c8-4fae-b739-534ec0e520d5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.785334 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/089e4ab9-11b3-4b39-b74c-c61227722e66-config-volume\") pod \"dns-default-qmbhh\" (UID: \"089e4ab9-11b3-4b39-b74c-c61227722e66\") " pod="openshift-dns/dns-default-qmbhh" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.785942 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3eecf253-d2c2-46ae-97d9-317d07bd346b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-khq82\" (UID: \"3eecf253-d2c2-46ae-97d9-317d07bd346b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.787722 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-registry-tls\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.789067 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/85dbb42a-8a15-46fd-9cf1-5f48bebf8378-node-bootstrap-token\") pod \"machine-config-server-z8b2b\" (UID: \"85dbb42a-8a15-46fd-9cf1-5f48bebf8378\") " pod="openshift-machine-config-operator/machine-config-server-z8b2b" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.789192 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-bound-sa-token\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.789312 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-socket-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.789393 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-plugins-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.789695 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbzhw\" (UniqueName: \"kubernetes.io/projected/07858e25-96fe-4c88-b094-de75bbe5066a-kube-api-access-xbzhw\") pod \"catalog-operator-68c6474976-7lr62\" (UID: \"07858e25-96fe-4c88-b094-de75bbe5066a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.789976 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/85dbb42a-8a15-46fd-9cf1-5f48bebf8378-certs\") pod \"machine-config-server-z8b2b\" (UID: \"85dbb42a-8a15-46fd-9cf1-5f48bebf8378\") " pod="openshift-machine-config-operator/machine-config-server-z8b2b" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.790029 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/380d647c-be46-416a-9152-605a41509c0c-signing-key\") pod \"service-ca-9c57cc56f-7bhcw\" (UID: \"380d647c-be46-416a-9152-605a41509c0c\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bhcw" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.790407 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.790449 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-mountpoint-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.790476 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/06539de3-ec9c-42dd-b5cb-c23227463dba-metrics-certs\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.790672 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3f10744e-fb73-4689-979b-59c32ba0ae6a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.790707 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd75b2d0-63c6-4106-83fd-f2c15129ce82-serving-cert\") pod \"service-ca-operator-777779d784-krfkq\" (UID: \"dd75b2d0-63c6-4106-83fd-f2c15129ce82\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-krfkq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.790748 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/07858e25-96fe-4c88-b094-de75bbe5066a-profile-collector-cert\") pod \"catalog-operator-68c6474976-7lr62\" (UID: \"07858e25-96fe-4c88-b094-de75bbe5066a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.790771 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/06539de3-ec9c-42dd-b5cb-c23227463dba-default-certificate\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.790793 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zctrf\" (UniqueName: \"kubernetes.io/projected/380d647c-be46-416a-9152-605a41509c0c-kube-api-access-zctrf\") pod \"service-ca-9c57cc56f-7bhcw\" (UID: \"380d647c-be46-416a-9152-605a41509c0c\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bhcw" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.791404 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2e21045d-f1d4-4e65-a7d5-5fd250dba564-proxy-tls\") pod \"machine-config-operator-74547568cd-ldfjq\" (UID: \"2e21045d-f1d4-4e65-a7d5-5fd250dba564\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.791435 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8p5cw\" (UniqueName: \"kubernetes.io/projected/2e21045d-f1d4-4e65-a7d5-5fd250dba564-kube-api-access-8p5cw\") pod \"machine-config-operator-74547568cd-ldfjq\" (UID: \"2e21045d-f1d4-4e65-a7d5-5fd250dba564\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.791468 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/380d647c-be46-416a-9152-605a41509c0c-signing-cabundle\") pod \"service-ca-9c57cc56f-7bhcw\" (UID: \"380d647c-be46-416a-9152-605a41509c0c\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bhcw" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.791515 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mb7ns\" (UniqueName: \"kubernetes.io/projected/769629cb-29e2-4d73-8628-c8ee04bd9040-kube-api-access-mb7ns\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.791539 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-78rzv\" (UID: \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\") " pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.791602 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2e21045d-f1d4-4e65-a7d5-5fd250dba564-images\") pod \"machine-config-operator-74547568cd-ldfjq\" (UID: \"2e21045d-f1d4-4e65-a7d5-5fd250dba564\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.791628 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vwg7d\" (UniqueName: \"kubernetes.io/projected/089e4ab9-11b3-4b39-b74c-c61227722e66-kube-api-access-vwg7d\") pod \"dns-default-qmbhh\" (UID: \"089e4ab9-11b3-4b39-b74c-c61227722e66\") " pod="openshift-dns/dns-default-qmbhh" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.791683 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ae41dbb9-e2c8-4fae-b739-534ec0e520d5-webhook-cert\") pod \"packageserver-d55dfcdfc-6nn9f\" (UID: \"ae41dbb9-e2c8-4fae-b739-534ec0e520d5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.791760 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fba1f4e8-1272-428d-95eb-7e01208f7b97-config-volume\") pod \"collect-profiles-29401770-kk6cx\" (UID: \"fba1f4e8-1272-428d-95eb-7e01208f7b97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.793921 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3eecf253-d2c2-46ae-97d9-317d07bd346b-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-khq82\" (UID: \"3eecf253-d2c2-46ae-97d9-317d07bd346b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.795036 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3f10744e-fb73-4689-979b-59c32ba0ae6a-ca-trust-extracted\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.795951 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7dfg\" (UniqueName: \"kubernetes.io/projected/1e473d16-b8d7-4f50-966f-8c3536051b54-kube-api-access-l7dfg\") pod \"package-server-manager-789f6589d5-rclxw\" (UID: \"1e473d16-b8d7-4f50-966f-8c3536051b54\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.796016 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/07858e25-96fe-4c88-b094-de75bbe5066a-srv-cert\") pod \"catalog-operator-68c6474976-7lr62\" (UID: \"07858e25-96fe-4c88-b094-de75bbe5066a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.796846 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tt662\" (UniqueName: \"kubernetes.io/projected/dd75b2d0-63c6-4106-83fd-f2c15129ce82-kube-api-access-tt662\") pod \"service-ca-operator-777779d784-krfkq\" (UID: \"dd75b2d0-63c6-4106-83fd-f2c15129ce82\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-krfkq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.797148 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fba1f4e8-1272-428d-95eb-7e01208f7b97-secret-volume\") pod \"collect-profiles-29401770-kk6cx\" (UID: \"fba1f4e8-1272-428d-95eb-7e01208f7b97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" Nov 25 21:33:07 crc kubenswrapper[4910]: E1125 21:33:07.799883 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:08.299859054 +0000 UTC m=+143.762335376 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.801461 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2e21045d-f1d4-4e65-a7d5-5fd250dba564-auth-proxy-config\") pod \"machine-config-operator-74547568cd-ldfjq\" (UID: \"2e21045d-f1d4-4e65-a7d5-5fd250dba564\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.801555 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7f5z9\" (UniqueName: \"kubernetes.io/projected/3eecf253-d2c2-46ae-97d9-317d07bd346b-kube-api-access-7f5z9\") pod \"machine-config-controller-84d6567774-khq82\" (UID: \"3eecf253-d2c2-46ae-97d9-317d07bd346b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.803493 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/2e21045d-f1d4-4e65-a7d5-5fd250dba564-auth-proxy-config\") pod \"machine-config-operator-74547568cd-ldfjq\" (UID: \"2e21045d-f1d4-4e65-a7d5-5fd250dba564\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.803623 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/2e21045d-f1d4-4e65-a7d5-5fd250dba564-images\") pod \"machine-config-operator-74547568cd-ldfjq\" (UID: \"2e21045d-f1d4-4e65-a7d5-5fd250dba564\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.804293 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.811693 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.811775 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3f10744e-fb73-4689-979b-59c32ba0ae6a-installation-pull-secrets\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.815669 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6ncxd" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.822034 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/2e21045d-f1d4-4e65-a7d5-5fd250dba564-proxy-tls\") pod \"machine-config-operator-74547568cd-ldfjq\" (UID: \"2e21045d-f1d4-4e65-a7d5-5fd250dba564\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.828016 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxpch\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-kube-api-access-xxpch\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.829221 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3eecf253-d2c2-46ae-97d9-317d07bd346b-proxy-tls\") pod \"machine-config-controller-84d6567774-khq82\" (UID: \"3eecf253-d2c2-46ae-97d9-317d07bd346b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.846783 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-wktng" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.855869 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.856491 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-bound-sa-token\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.880115 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8p5cw\" (UniqueName: \"kubernetes.io/projected/2e21045d-f1d4-4e65-a7d5-5fd250dba564-kube-api-access-8p5cw\") pod \"machine-config-operator-74547568cd-ldfjq\" (UID: \"2e21045d-f1d4-4e65-a7d5-5fd250dba564\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.898902 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7f5z9\" (UniqueName: \"kubernetes.io/projected/3eecf253-d2c2-46ae-97d9-317d07bd346b-kube-api-access-7f5z9\") pod \"machine-config-controller-84d6567774-khq82\" (UID: \"3eecf253-d2c2-46ae-97d9-317d07bd346b\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.902372 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:07 crc kubenswrapper[4910]: E1125 21:33:07.902654 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:08.402620586 +0000 UTC m=+143.865096908 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.902734 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/380d647c-be46-416a-9152-605a41509c0c-signing-cabundle\") pod \"service-ca-9c57cc56f-7bhcw\" (UID: \"380d647c-be46-416a-9152-605a41509c0c\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bhcw" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.902765 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mb7ns\" (UniqueName: \"kubernetes.io/projected/769629cb-29e2-4d73-8628-c8ee04bd9040-kube-api-access-mb7ns\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.902786 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-78rzv\" (UID: \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\") " pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.902813 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vwg7d\" (UniqueName: \"kubernetes.io/projected/089e4ab9-11b3-4b39-b74c-c61227722e66-kube-api-access-vwg7d\") pod \"dns-default-qmbhh\" (UID: \"089e4ab9-11b3-4b39-b74c-c61227722e66\") " pod="openshift-dns/dns-default-qmbhh" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.902840 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ae41dbb9-e2c8-4fae-b739-534ec0e520d5-webhook-cert\") pod \"packageserver-d55dfcdfc-6nn9f\" (UID: \"ae41dbb9-e2c8-4fae-b739-534ec0e520d5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.902861 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fba1f4e8-1272-428d-95eb-7e01208f7b97-config-volume\") pod \"collect-profiles-29401770-kk6cx\" (UID: \"fba1f4e8-1272-428d-95eb-7e01208f7b97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.902883 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7dfg\" (UniqueName: \"kubernetes.io/projected/1e473d16-b8d7-4f50-966f-8c3536051b54-kube-api-access-l7dfg\") pod \"package-server-manager-789f6589d5-rclxw\" (UID: \"1e473d16-b8d7-4f50-966f-8c3536051b54\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.902928 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/07858e25-96fe-4c88-b094-de75bbe5066a-srv-cert\") pod \"catalog-operator-68c6474976-7lr62\" (UID: \"07858e25-96fe-4c88-b094-de75bbe5066a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.902953 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tt662\" (UniqueName: \"kubernetes.io/projected/dd75b2d0-63c6-4106-83fd-f2c15129ce82-kube-api-access-tt662\") pod \"service-ca-operator-777779d784-krfkq\" (UID: \"dd75b2d0-63c6-4106-83fd-f2c15129ce82\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-krfkq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.902979 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fba1f4e8-1272-428d-95eb-7e01208f7b97-secret-volume\") pod \"collect-profiles-29401770-kk6cx\" (UID: \"fba1f4e8-1272-428d-95eb-7e01208f7b97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.902998 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ffe31541-2d82-48bf-b8ae-eb586019573e-cert\") pod \"ingress-canary-sfx28\" (UID: \"ffe31541-2d82-48bf-b8ae-eb586019573e\") " pod="openshift-ingress-canary/ingress-canary-sfx28" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903017 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2k254\" (UniqueName: \"kubernetes.io/projected/b6e50a82-11e5-4428-a1d7-f43cb9f1a2d8-kube-api-access-2k254\") pod \"migrator-59844c95c7-tbvtg\" (UID: \"b6e50a82-11e5-4428-a1d7-f43cb9f1a2d8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tbvtg" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903034 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/06539de3-ec9c-42dd-b5cb-c23227463dba-stats-auth\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903058 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06539de3-ec9c-42dd-b5cb-c23227463dba-service-ca-bundle\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903078 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-78rzv\" (UID: \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\") " pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903095 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ae41dbb9-e2c8-4fae-b739-534ec0e520d5-apiservice-cert\") pod \"packageserver-d55dfcdfc-6nn9f\" (UID: \"ae41dbb9-e2c8-4fae-b739-534ec0e520d5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903110 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljmjk\" (UniqueName: \"kubernetes.io/projected/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-kube-api-access-ljmjk\") pod \"marketplace-operator-79b997595-78rzv\" (UID: \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\") " pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903128 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-csi-data-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903145 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-registration-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903162 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/1e473d16-b8d7-4f50-966f-8c3536051b54-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rclxw\" (UID: \"1e473d16-b8d7-4f50-966f-8c3536051b54\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903224 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c52d4\" (UniqueName: \"kubernetes.io/projected/85dbb42a-8a15-46fd-9cf1-5f48bebf8378-kube-api-access-c52d4\") pod \"machine-config-server-z8b2b\" (UID: \"85dbb42a-8a15-46fd-9cf1-5f48bebf8378\") " pod="openshift-machine-config-operator/machine-config-server-z8b2b" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903263 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/089e4ab9-11b3-4b39-b74c-c61227722e66-metrics-tls\") pod \"dns-default-qmbhh\" (UID: \"089e4ab9-11b3-4b39-b74c-c61227722e66\") " pod="openshift-dns/dns-default-qmbhh" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903295 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrd58\" (UniqueName: \"kubernetes.io/projected/ae41dbb9-e2c8-4fae-b739-534ec0e520d5-kube-api-access-xrd58\") pod \"packageserver-d55dfcdfc-6nn9f\" (UID: \"ae41dbb9-e2c8-4fae-b739-534ec0e520d5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903320 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlz2m\" (UniqueName: \"kubernetes.io/projected/fba1f4e8-1272-428d-95eb-7e01208f7b97-kube-api-access-qlz2m\") pod \"collect-profiles-29401770-kk6cx\" (UID: \"fba1f4e8-1272-428d-95eb-7e01208f7b97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903342 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5drh\" (UniqueName: \"kubernetes.io/projected/ffe31541-2d82-48bf-b8ae-eb586019573e-kube-api-access-w5drh\") pod \"ingress-canary-sfx28\" (UID: \"ffe31541-2d82-48bf-b8ae-eb586019573e\") " pod="openshift-ingress-canary/ingress-canary-sfx28" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903366 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5jkg\" (UniqueName: \"kubernetes.io/projected/06539de3-ec9c-42dd-b5cb-c23227463dba-kube-api-access-x5jkg\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903386 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd75b2d0-63c6-4106-83fd-f2c15129ce82-config\") pod \"service-ca-operator-777779d784-krfkq\" (UID: \"dd75b2d0-63c6-4106-83fd-f2c15129ce82\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-krfkq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903406 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/ae41dbb9-e2c8-4fae-b739-534ec0e520d5-tmpfs\") pod \"packageserver-d55dfcdfc-6nn9f\" (UID: \"ae41dbb9-e2c8-4fae-b739-534ec0e520d5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903422 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/089e4ab9-11b3-4b39-b74c-c61227722e66-config-volume\") pod \"dns-default-qmbhh\" (UID: \"089e4ab9-11b3-4b39-b74c-c61227722e66\") " pod="openshift-dns/dns-default-qmbhh" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903445 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/85dbb42a-8a15-46fd-9cf1-5f48bebf8378-node-bootstrap-token\") pod \"machine-config-server-z8b2b\" (UID: \"85dbb42a-8a15-46fd-9cf1-5f48bebf8378\") " pod="openshift-machine-config-operator/machine-config-server-z8b2b" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903464 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-socket-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903489 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-plugins-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903506 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbzhw\" (UniqueName: \"kubernetes.io/projected/07858e25-96fe-4c88-b094-de75bbe5066a-kube-api-access-xbzhw\") pod \"catalog-operator-68c6474976-7lr62\" (UID: \"07858e25-96fe-4c88-b094-de75bbe5066a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903523 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/85dbb42a-8a15-46fd-9cf1-5f48bebf8378-certs\") pod \"machine-config-server-z8b2b\" (UID: \"85dbb42a-8a15-46fd-9cf1-5f48bebf8378\") " pod="openshift-machine-config-operator/machine-config-server-z8b2b" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903539 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/380d647c-be46-416a-9152-605a41509c0c-signing-key\") pod \"service-ca-9c57cc56f-7bhcw\" (UID: \"380d647c-be46-416a-9152-605a41509c0c\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bhcw" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903565 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903685 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-mountpoint-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903704 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/06539de3-ec9c-42dd-b5cb-c23227463dba-metrics-certs\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903735 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd75b2d0-63c6-4106-83fd-f2c15129ce82-serving-cert\") pod \"service-ca-operator-777779d784-krfkq\" (UID: \"dd75b2d0-63c6-4106-83fd-f2c15129ce82\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-krfkq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903752 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/07858e25-96fe-4c88-b094-de75bbe5066a-profile-collector-cert\") pod \"catalog-operator-68c6474976-7lr62\" (UID: \"07858e25-96fe-4c88-b094-de75bbe5066a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903770 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/06539de3-ec9c-42dd-b5cb-c23227463dba-default-certificate\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.903789 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zctrf\" (UniqueName: \"kubernetes.io/projected/380d647c-be46-416a-9152-605a41509c0c-kube-api-access-zctrf\") pod \"service-ca-9c57cc56f-7bhcw\" (UID: \"380d647c-be46-416a-9152-605a41509c0c\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bhcw" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.905485 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/380d647c-be46-416a-9152-605a41509c0c-signing-cabundle\") pod \"service-ca-9c57cc56f-7bhcw\" (UID: \"380d647c-be46-416a-9152-605a41509c0c\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bhcw" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.906280 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-plugins-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.906381 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-mountpoint-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.908136 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-78rzv\" (UID: \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\") " pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.909566 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd75b2d0-63c6-4106-83fd-f2c15129ce82-config\") pod \"service-ca-operator-777779d784-krfkq\" (UID: \"dd75b2d0-63c6-4106-83fd-f2c15129ce82\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-krfkq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.910054 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/ae41dbb9-e2c8-4fae-b739-534ec0e520d5-tmpfs\") pod \"packageserver-d55dfcdfc-6nn9f\" (UID: \"ae41dbb9-e2c8-4fae-b739-534ec0e520d5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.911121 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.912461 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/089e4ab9-11b3-4b39-b74c-c61227722e66-config-volume\") pod \"dns-default-qmbhh\" (UID: \"089e4ab9-11b3-4b39-b74c-c61227722e66\") " pod="openshift-dns/dns-default-qmbhh" Nov 25 21:33:07 crc kubenswrapper[4910]: E1125 21:33:07.914235 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:08.414212173 +0000 UTC m=+143.876688495 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.915753 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fba1f4e8-1272-428d-95eb-7e01208f7b97-config-volume\") pod \"collect-profiles-29401770-kk6cx\" (UID: \"fba1f4e8-1272-428d-95eb-7e01208f7b97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.915838 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-socket-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.918954 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/089e4ab9-11b3-4b39-b74c-c61227722e66-metrics-tls\") pod \"dns-default-qmbhh\" (UID: \"089e4ab9-11b3-4b39-b74c-c61227722e66\") " pod="openshift-dns/dns-default-qmbhh" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.920258 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/06539de3-ec9c-42dd-b5cb-c23227463dba-service-ca-bundle\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.927786 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-csi-data-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.927865 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/769629cb-29e2-4d73-8628-c8ee04bd9040-registration-dir\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.930557 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd75b2d0-63c6-4106-83fd-f2c15129ce82-serving-cert\") pod \"service-ca-operator-777779d784-krfkq\" (UID: \"dd75b2d0-63c6-4106-83fd-f2c15129ce82\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-krfkq" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.933544 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/07858e25-96fe-4c88-b094-de75bbe5066a-profile-collector-cert\") pod \"catalog-operator-68c6474976-7lr62\" (UID: \"07858e25-96fe-4c88-b094-de75bbe5066a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.950002 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ae41dbb9-e2c8-4fae-b739-534ec0e520d5-webhook-cert\") pod \"packageserver-d55dfcdfc-6nn9f\" (UID: \"ae41dbb9-e2c8-4fae-b739-534ec0e520d5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.951609 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/06539de3-ec9c-42dd-b5cb-c23227463dba-default-certificate\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.952558 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/380d647c-be46-416a-9152-605a41509c0c-signing-key\") pod \"service-ca-9c57cc56f-7bhcw\" (UID: \"380d647c-be46-416a-9152-605a41509c0c\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bhcw" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.952941 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/06539de3-ec9c-42dd-b5cb-c23227463dba-metrics-certs\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.953831 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/85dbb42a-8a15-46fd-9cf1-5f48bebf8378-node-bootstrap-token\") pod \"machine-config-server-z8b2b\" (UID: \"85dbb42a-8a15-46fd-9cf1-5f48bebf8378\") " pod="openshift-machine-config-operator/machine-config-server-z8b2b" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.954342 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/07858e25-96fe-4c88-b094-de75bbe5066a-srv-cert\") pod \"catalog-operator-68c6474976-7lr62\" (UID: \"07858e25-96fe-4c88-b094-de75bbe5066a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.954341 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fba1f4e8-1272-428d-95eb-7e01208f7b97-secret-volume\") pod \"collect-profiles-29401770-kk6cx\" (UID: \"fba1f4e8-1272-428d-95eb-7e01208f7b97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.955672 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/1e473d16-b8d7-4f50-966f-8c3536051b54-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rclxw\" (UID: \"1e473d16-b8d7-4f50-966f-8c3536051b54\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.957213 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-78rzv\" (UID: \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\") " pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.963521 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/06539de3-ec9c-42dd-b5cb-c23227463dba-stats-auth\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.976377 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zctrf\" (UniqueName: \"kubernetes.io/projected/380d647c-be46-416a-9152-605a41509c0c-kube-api-access-zctrf\") pod \"service-ca-9c57cc56f-7bhcw\" (UID: \"380d647c-be46-416a-9152-605a41509c0c\") " pod="openshift-service-ca/service-ca-9c57cc56f-7bhcw" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.982047 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5ncw9"] Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.983154 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mb7ns\" (UniqueName: \"kubernetes.io/projected/769629cb-29e2-4d73-8628-c8ee04bd9040-kube-api-access-mb7ns\") pod \"csi-hostpathplugin-w78cp\" (UID: \"769629cb-29e2-4d73-8628-c8ee04bd9040\") " pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.989429 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ffe31541-2d82-48bf-b8ae-eb586019573e-cert\") pod \"ingress-canary-sfx28\" (UID: \"ffe31541-2d82-48bf-b8ae-eb586019573e\") " pod="openshift-ingress-canary/ingress-canary-sfx28" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.990171 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/ae41dbb9-e2c8-4fae-b739-534ec0e520d5-apiservice-cert\") pod \"packageserver-d55dfcdfc-6nn9f\" (UID: \"ae41dbb9-e2c8-4fae-b739-534ec0e520d5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:07 crc kubenswrapper[4910]: I1125 21:33:07.996771 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/85dbb42a-8a15-46fd-9cf1-5f48bebf8378-certs\") pod \"machine-config-server-z8b2b\" (UID: \"85dbb42a-8a15-46fd-9cf1-5f48bebf8378\") " pod="openshift-machine-config-operator/machine-config-server-z8b2b" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.005620 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:08 crc kubenswrapper[4910]: E1125 21:33:08.006623 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:08.506598235 +0000 UTC m=+143.969074557 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.008917 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlz2m\" (UniqueName: \"kubernetes.io/projected/fba1f4e8-1272-428d-95eb-7e01208f7b97-kube-api-access-qlz2m\") pod \"collect-profiles-29401770-kk6cx\" (UID: \"fba1f4e8-1272-428d-95eb-7e01208f7b97\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.012487 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrd58\" (UniqueName: \"kubernetes.io/projected/ae41dbb9-e2c8-4fae-b739-534ec0e520d5-kube-api-access-xrd58\") pod \"packageserver-d55dfcdfc-6nn9f\" (UID: \"ae41dbb9-e2c8-4fae-b739-534ec0e520d5\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.031707 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5drh\" (UniqueName: \"kubernetes.io/projected/ffe31541-2d82-48bf-b8ae-eb586019573e-kube-api-access-w5drh\") pod \"ingress-canary-sfx28\" (UID: \"ffe31541-2d82-48bf-b8ae-eb586019573e\") " pod="openshift-ingress-canary/ingress-canary-sfx28" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.045917 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbzhw\" (UniqueName: \"kubernetes.io/projected/07858e25-96fe-4c88-b094-de75bbe5066a-kube-api-access-xbzhw\") pod \"catalog-operator-68c6474976-7lr62\" (UID: \"07858e25-96fe-4c88-b094-de75bbe5066a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.047682 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-6lgsx"] Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.066409 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5jkg\" (UniqueName: \"kubernetes.io/projected/06539de3-ec9c-42dd-b5cb-c23227463dba-kube-api-access-x5jkg\") pod \"router-default-5444994796-cfjc4\" (UID: \"06539de3-ec9c-42dd-b5cb-c23227463dba\") " pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.085968 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677" event={"ID":"00aa9da1-ce63-4c3f-bbec-4f1c97d85838","Type":"ContainerStarted","Data":"f7ef47ab3d963978f041edcd16723445faa1d111b4597a910de6d564c86740c2"} Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.086088 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c52d4\" (UniqueName: \"kubernetes.io/projected/85dbb42a-8a15-46fd-9cf1-5f48bebf8378-kube-api-access-c52d4\") pod \"machine-config-server-z8b2b\" (UID: \"85dbb42a-8a15-46fd-9cf1-5f48bebf8378\") " pod="openshift-machine-config-operator/machine-config-server-z8b2b" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.087068 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" event={"ID":"3e82986a-2957-4450-b122-a47b6d65fd63","Type":"ContainerStarted","Data":"b13921a5deefcf00dd84c2beafe87dc5871139d67a6e7d32148714a35309d716"} Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.087747 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" event={"ID":"87561ea5-e7c7-4286-aa91-43c6478ff037","Type":"ContainerStarted","Data":"aa9ca178903ff8e500a48533d5c39819908af651635895859e51e7e0f25061fe"} Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.088884 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" event={"ID":"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4","Type":"ContainerStarted","Data":"4aa8faa98b6e9f52d04ec945f9894394b9bcdf7c8493e38b6bd5af5c7fea4743"} Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.092603 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-4hf2m" event={"ID":"761b3955-fd92-419a-934c-31f294bbecde","Type":"ContainerStarted","Data":"1fd82c44c5ca920dfc99328f0081af2d632315ed44c1bb46ee59be0a08e33f2b"} Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.105921 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljmjk\" (UniqueName: \"kubernetes.io/projected/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-kube-api-access-ljmjk\") pod \"marketplace-operator-79b997595-78rzv\" (UID: \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\") " pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.113206 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:08 crc kubenswrapper[4910]: E1125 21:33:08.114114 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:08.614096803 +0000 UTC m=+144.076573125 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.131653 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7dfg\" (UniqueName: \"kubernetes.io/projected/1e473d16-b8d7-4f50-966f-8c3536051b54-kube-api-access-l7dfg\") pod \"package-server-manager-789f6589d5-rclxw\" (UID: \"1e473d16-b8d7-4f50-966f-8c3536051b54\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.141759 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp"] Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.142577 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.149851 4910 generic.go:334] "Generic (PLEG): container finished" podID="df48041a-e8d7-49d2-a8a8-159d5ed5ec6d" containerID="916ea643188f59bffc800609f3eca4c32ef4ee90831c96923f127461ccfa6633" exitCode=0 Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.150508 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k7kph" event={"ID":"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d","Type":"ContainerDied","Data":"916ea643188f59bffc800609f3eca4c32ef4ee90831c96923f127461ccfa6633"} Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.162826 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.164903 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2k254\" (UniqueName: \"kubernetes.io/projected/b6e50a82-11e5-4428-a1d7-f43cb9f1a2d8-kube-api-access-2k254\") pod \"migrator-59844c95c7-tbvtg\" (UID: \"b6e50a82-11e5-4428-a1d7-f43cb9f1a2d8\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tbvtg" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.182823 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-7bhcw" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.183082 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tbvtg" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.185699 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.197963 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.200740 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.204405 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vwg7d\" (UniqueName: \"kubernetes.io/projected/089e4ab9-11b3-4b39-b74c-c61227722e66-kube-api-access-vwg7d\") pod \"dns-default-qmbhh\" (UID: \"089e4ab9-11b3-4b39-b74c-c61227722e66\") " pod="openshift-dns/dns-default-qmbhh" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.211423 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-jqcq6"] Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.213480 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.214174 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:08 crc kubenswrapper[4910]: E1125 21:33:08.214905 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:08.714845192 +0000 UTC m=+144.177321514 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.216200 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:08 crc kubenswrapper[4910]: E1125 21:33:08.216857 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:08.716841974 +0000 UTC m=+144.179318296 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.237271 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-z8b2b" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.238055 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.238295 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.243156 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tt662\" (UniqueName: \"kubernetes.io/projected/dd75b2d0-63c6-4106-83fd-f2c15129ce82-kube-api-access-tt662\") pod \"service-ca-operator-777779d784-krfkq\" (UID: \"dd75b2d0-63c6-4106-83fd-f2c15129ce82\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-krfkq" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.274568 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-qmbhh" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.276828 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-w78cp" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.279015 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-sfx28" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.289367 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" event={"ID":"1fcc231e-278e-466a-97b3-34dc4a705b35","Type":"ContainerStarted","Data":"4e20d5c646c5b1f31d690cae5c21e629d64858bfa7fdcee8881851070b328a07"} Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.295260 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b"] Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.300800 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt"] Nov 25 21:33:08 crc kubenswrapper[4910]: W1125 21:33:08.312068 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf63a8e93_5656_4edc_9ee5_24314ebf749a.slice/crio-3175deeb69168b355e99e1a1eb2b5077f65427ae1595a0fa5277d6f8c7056a8a WatchSource:0}: Error finding container 3175deeb69168b355e99e1a1eb2b5077f65427ae1595a0fa5277d6f8c7056a8a: Status 404 returned error can't find the container with id 3175deeb69168b355e99e1a1eb2b5077f65427ae1595a0fa5277d6f8c7056a8a Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.317796 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:08 crc kubenswrapper[4910]: E1125 21:33:08.319754 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:08.819723499 +0000 UTC m=+144.282199821 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.338423 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-shg9w"] Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.373524 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck"] Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.392351 4910 generic.go:334] "Generic (PLEG): container finished" podID="4cf4299b-4d9a-4b11-bf53-34bf106d39ef" containerID="8248cc02e9c8caa276009696c27ad97b8692cc16abddb3c82dbea1e700b94fdb" exitCode=0 Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.392533 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" event={"ID":"4cf4299b-4d9a-4b11-bf53-34bf106d39ef","Type":"ContainerDied","Data":"8248cc02e9c8caa276009696c27ad97b8692cc16abddb3c82dbea1e700b94fdb"} Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.392624 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" event={"ID":"4cf4299b-4d9a-4b11-bf53-34bf106d39ef","Type":"ContainerStarted","Data":"5923bae8cdbdba85405205f47f727304ed866820c4f5facf7216cbbbf3282216"} Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.396799 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6ncxd"] Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.398193 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-47x5d" event={"ID":"685f36d8-75f7-4b23-8eba-081657468d03","Type":"ContainerStarted","Data":"0b4c1f50a8ebf45838b85352886a6c0691bbed20066ad95014163524cf03a65d"} Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.428607 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:08 crc kubenswrapper[4910]: E1125 21:33:08.429144 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:08.929126745 +0000 UTC m=+144.391603067 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.437461 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" event={"ID":"bc0f5871-442b-4fa3-863c-173c2df1ffd4","Type":"ContainerStarted","Data":"e97dfeb63fe2a638fc1b002947e930574b34364c6ce3ccd59ff3bab4c29b18b8"} Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.443413 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" event={"ID":"d166f48f-7e2e-4c0f-a121-0899af7f81ab","Type":"ContainerStarted","Data":"da430778a1f582697c70929081046cbbfa24dd499df693d7645c11dbcfc772a7"} Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.465901 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.511938 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-krfkq" Nov 25 21:33:08 crc kubenswrapper[4910]: W1125 21:33:08.515302 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda90ac3d5_841b_49f1_a6f3_2647f598ab89.slice/crio-07192fbfaf14e3d5f8a04a5873756bc5f0b9b7e22337c45f3403bb05f279c07d WatchSource:0}: Error finding container 07192fbfaf14e3d5f8a04a5873756bc5f0b9b7e22337c45f3403bb05f279c07d: Status 404 returned error can't find the container with id 07192fbfaf14e3d5f8a04a5873756bc5f0b9b7e22337c45f3403bb05f279c07d Nov 25 21:33:08 crc kubenswrapper[4910]: W1125 21:33:08.518563 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf1e48d81_784e_4803_8f4c_838f551cf7e0.slice/crio-c44fb0bd960b34a0ef4f716f8eb28b82a8dc324e5b4598772660d68ec5b50d53 WatchSource:0}: Error finding container c44fb0bd960b34a0ef4f716f8eb28b82a8dc324e5b4598772660d68ec5b50d53: Status 404 returned error can't find the container with id c44fb0bd960b34a0ef4f716f8eb28b82a8dc324e5b4598772660d68ec5b50d53 Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.535924 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:08 crc kubenswrapper[4910]: E1125 21:33:08.540597 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:09.040563524 +0000 UTC m=+144.503039846 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:08 crc kubenswrapper[4910]: W1125 21:33:08.542133 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8eb6262_2c30_4192_8936_9463698c361e.slice/crio-09abc3163f99df76a941aceee19b57582150e61e9c6e5b9691dd16c61b4ff86a WatchSource:0}: Error finding container 09abc3163f99df76a941aceee19b57582150e61e9c6e5b9691dd16c61b4ff86a: Status 404 returned error can't find the container with id 09abc3163f99df76a941aceee19b57582150e61e9c6e5b9691dd16c61b4ff86a Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.596516 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g"] Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.637330 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4"] Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.642932 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:08 crc kubenswrapper[4910]: E1125 21:33:08.645962 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:09.145947777 +0000 UTC m=+144.608424099 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.675396 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj"] Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.687757 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg"] Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.698941 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-wktng"] Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.751696 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:08 crc kubenswrapper[4910]: E1125 21:33:08.752127 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:09.252108723 +0000 UTC m=+144.714585045 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.760980 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-7bhcw"] Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.814478 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq"] Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.855227 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:08 crc kubenswrapper[4910]: E1125 21:33:08.855827 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:09.355800334 +0000 UTC m=+144.818276656 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:08 crc kubenswrapper[4910]: W1125 21:33:08.919833 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc41d5586_c61c_42ec_a6c3_c22bf75f7f1e.slice/crio-e33282cb346ff5f8c28829a0dffa1af1b20a2ca334b0f907601ca62070517ea4 WatchSource:0}: Error finding container e33282cb346ff5f8c28829a0dffa1af1b20a2ca334b0f907601ca62070517ea4: Status 404 returned error can't find the container with id e33282cb346ff5f8c28829a0dffa1af1b20a2ca334b0f907601ca62070517ea4 Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.956972 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:08 crc kubenswrapper[4910]: E1125 21:33:08.957457 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:09.45743229 +0000 UTC m=+144.919908612 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.957821 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:08 crc kubenswrapper[4910]: E1125 21:33:08.958468 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:09.458455182 +0000 UTC m=+144.920931504 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:08 crc kubenswrapper[4910]: I1125 21:33:08.986523 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" podStartSLOduration=122.986493067 podStartE2EDuration="2m2.986493067s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:08.983805594 +0000 UTC m=+144.446281916" watchObservedRunningTime="2025-11-25 21:33:08.986493067 +0000 UTC m=+144.448969389" Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.047983 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw"] Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.064652 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:09 crc kubenswrapper[4910]: E1125 21:33:09.065817 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:09.565791045 +0000 UTC m=+145.028267367 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.076975 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:09 crc kubenswrapper[4910]: E1125 21:33:09.080718 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:09.580687495 +0000 UTC m=+145.043163817 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.179790 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:09 crc kubenswrapper[4910]: E1125 21:33:09.180122 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:09.680099213 +0000 UTC m=+145.142575535 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.192693 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" podStartSLOduration=123.192672011 podStartE2EDuration="2m3.192672011s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:09.188145541 +0000 UTC m=+144.650621863" watchObservedRunningTime="2025-11-25 21:33:09.192672011 +0000 UTC m=+144.655148333" Nov 25 21:33:09 crc kubenswrapper[4910]: E1125 21:33:09.280823 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:09.780810861 +0000 UTC m=+145.243287183 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.281189 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.381989 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-wkpdc" podStartSLOduration=124.381971433 podStartE2EDuration="2m4.381971433s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:09.374695649 +0000 UTC m=+144.837171971" watchObservedRunningTime="2025-11-25 21:33:09.381971433 +0000 UTC m=+144.844447755" Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.389777 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:09 crc kubenswrapper[4910]: E1125 21:33:09.390772 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:09.890728554 +0000 UTC m=+145.353204876 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.414925 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-w78cp"] Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.493931 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:09 crc kubenswrapper[4910]: E1125 21:33:09.494399 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:09.994387053 +0000 UTC m=+145.456863375 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.527507 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj" event={"ID":"d5c80d9d-f8b4-4a0e-9787-818e7b029259","Type":"ContainerStarted","Data":"5d8e6ad76f8896becd621f2c044e5af675c4b83b23e623315751ea673463d227"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.547769 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-tbvtg"] Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.593634 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-sfx28"] Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.597877 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:09 crc kubenswrapper[4910]: E1125 21:33:09.598514 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:10.098490326 +0000 UTC m=+145.560966648 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.636458 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-cfjc4" event={"ID":"06539de3-ec9c-42dd-b5cb-c23227463dba","Type":"ContainerStarted","Data":"db833864df2998b33c5532530fc42e939f1cc67040049e607f18096cb3d980d6"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.652453 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck" event={"ID":"f1e48d81-784e-4803-8f4c-838f551cf7e0","Type":"ContainerStarted","Data":"c44fb0bd960b34a0ef4f716f8eb28b82a8dc324e5b4598772660d68ec5b50d53"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.662805 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f"] Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.701639 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-8gf7r" podStartSLOduration=123.701604238 podStartE2EDuration="2m3.701604238s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:09.657106745 +0000 UTC m=+145.119583067" watchObservedRunningTime="2025-11-25 21:33:09.701604238 +0000 UTC m=+145.164080550" Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.721773 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx"] Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.723817 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" event={"ID":"dd8a83d7-0f5a-4a6b-b5b3-ff71d83debe4","Type":"ContainerStarted","Data":"21135c7a3d91be91acdcdb68b1fdc97061239fb192110eee1aaca30482160e8d"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.742771 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:09 crc kubenswrapper[4910]: E1125 21:33:09.743626 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:10.243612605 +0000 UTC m=+145.706088927 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.751396 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-4hf2m" event={"ID":"761b3955-fd92-419a-934c-31f294bbecde","Type":"ContainerStarted","Data":"2c8b4489f8980a6a0d12cdf62828459691eb560e098e47ce3725d8abb65e2990"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.751951 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-4hf2m" Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.773869 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-78rzv"] Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.779636 4910 patch_prober.go:28] interesting pod/downloads-7954f5f757-4hf2m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.782748 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4hf2m" podUID="761b3955-fd92-419a-934c-31f294bbecde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.795986 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" event={"ID":"937d04f6-f3ce-47ef-9c90-bb5aae951969","Type":"ContainerStarted","Data":"4ed0dad2139a6e8549f661abae33a9424b7111e1d7c1e627912a5584a8d92464"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.806275 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-z8b2b" event={"ID":"85dbb42a-8a15-46fd-9cf1-5f48bebf8378","Type":"ContainerStarted","Data":"7b8269bf3211dac8e00877782abe7f794d45a2e83851305f61fecbf244217551"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.821481 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6ncxd" event={"ID":"b8eb6262-2c30-4192-8936-9463698c361e","Type":"ContainerStarted","Data":"09abc3163f99df76a941aceee19b57582150e61e9c6e5b9691dd16c61b4ff86a"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.844683 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-shg9w" event={"ID":"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e","Type":"ContainerStarted","Data":"4d060d92ece7a4407da367900c7481dd35de0eb68bffa209284d4a1309ed965b"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.851977 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:09 crc kubenswrapper[4910]: E1125 21:33:09.853646 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:10.35362218 +0000 UTC m=+145.816098502 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.866177 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b" event={"ID":"bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b","Type":"ContainerStarted","Data":"f384fc743f8089a90f7bd420ac7d93e7dc29ee8a2677e32b8c896509aa507c33"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.873124 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-4856l" podStartSLOduration=124.873100132 podStartE2EDuration="2m4.873100132s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:09.871001857 +0000 UTC m=+145.333478179" watchObservedRunningTime="2025-11-25 21:33:09.873100132 +0000 UTC m=+145.335576454" Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.883046 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp" event={"ID":"6a78730f-7e10-472d-b99f-30aaec803f7e","Type":"ContainerStarted","Data":"95dffeedd2fdf19147bd25acde5ecde63d6bc42039faa1560c12ea628f327b2f"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.883089 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp" event={"ID":"6a78730f-7e10-472d-b99f-30aaec803f7e","Type":"ContainerStarted","Data":"f64bab812c77fefb6adc4332e7231c8da7023c3b50d1541fcc9763f18a1c83f8"} Nov 25 21:33:09 crc kubenswrapper[4910]: W1125 21:33:09.892311 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfba1f4e8_1272_428d_95eb_7e01208f7b97.slice/crio-f97125295f6807118c6ad9197dc2e7df3c5083f7eace9bb58a2ee4c691aab985 WatchSource:0}: Error finding container f97125295f6807118c6ad9197dc2e7df3c5083f7eace9bb58a2ee4c691aab985: Status 404 returned error can't find the container with id f97125295f6807118c6ad9197dc2e7df3c5083f7eace9bb58a2ee4c691aab985 Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.909770 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" event={"ID":"f63a8e93-5656-4edc-9ee5-24314ebf749a","Type":"ContainerStarted","Data":"3175deeb69168b355e99e1a1eb2b5077f65427ae1595a0fa5277d6f8c7056a8a"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.930728 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62"] Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.936131 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" event={"ID":"1fcc231e-278e-466a-97b3-34dc4a705b35","Type":"ContainerStarted","Data":"c456b22b2640c551f8a158e24f9ae65848f5c632fc6d0df407dcdeacfec5e0ec"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.951552 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-47x5d" event={"ID":"685f36d8-75f7-4b23-8eba-081657468d03","Type":"ContainerStarted","Data":"ebd43c009b5a113b6efc55fc414d8aead0d161c9895f941329402a0276e7b024"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.954180 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:09 crc kubenswrapper[4910]: E1125 21:33:09.955141 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:10.455126453 +0000 UTC m=+145.917602775 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.962918 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-4hf2m" podStartSLOduration=124.962889693 podStartE2EDuration="2m4.962889693s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:09.89735474 +0000 UTC m=+145.359831062" watchObservedRunningTime="2025-11-25 21:33:09.962889693 +0000 UTC m=+145.425366015" Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.968544 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-krfkq"] Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.976070 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw" event={"ID":"1e473d16-b8d7-4f50-966f-8c3536051b54","Type":"ContainerStarted","Data":"dccfe039d851bec9f76148d1717540efc02c9afe45f26bc89cfb61765e1aa2da"} Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.993144 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-shg9w" podStartSLOduration=124.993097655 podStartE2EDuration="2m4.993097655s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:09.937159269 +0000 UTC m=+145.399635611" watchObservedRunningTime="2025-11-25 21:33:09.993097655 +0000 UTC m=+145.455573977" Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.995376 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-qmbhh"] Nov 25 21:33:09 crc kubenswrapper[4910]: I1125 21:33:09.996547 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-4fdmp" podStartSLOduration=124.996536131 podStartE2EDuration="2m4.996536131s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:09.9754349 +0000 UTC m=+145.437911222" watchObservedRunningTime="2025-11-25 21:33:09.996536131 +0000 UTC m=+145.459012453" Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.021634 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-6lgsx" event={"ID":"373d4823-deb1-4353-afb0-fcc3894ecd5a","Type":"ContainerStarted","Data":"ea74e9aff734f15542523206cec59ea3436966ac27f3458997598762d414f91d"} Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.021698 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-6lgsx" event={"ID":"373d4823-deb1-4353-afb0-fcc3894ecd5a","Type":"ContainerStarted","Data":"72a10c7f0430f5a00971dd59d321940cdd56915a79254732d4ad908aa831f24d"} Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.024308 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.024893 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-47x5d" podStartSLOduration=125.024879986 podStartE2EDuration="2m5.024879986s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:10.024589347 +0000 UTC m=+145.487065669" watchObservedRunningTime="2025-11-25 21:33:10.024879986 +0000 UTC m=+145.487356308" Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.035774 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-7bhcw" event={"ID":"380d647c-be46-416a-9152-605a41509c0c","Type":"ContainerStarted","Data":"794be4f9c6e784146f89f2a6eb4aca1b84f2bf908efedf336acf31ac80714475"} Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.041630 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" event={"ID":"87561ea5-e7c7-4286-aa91-43c6478ff037","Type":"ContainerStarted","Data":"350ee50f1ff18d39563ed265588a7dfb72551a62bd0a27bfca7c6bada2fffd4a"} Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.051313 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-wktng" event={"ID":"c41d5586-c61c-42ec-a6c3-c22bf75f7f1e","Type":"ContainerStarted","Data":"e33282cb346ff5f8c28829a0dffa1af1b20a2ca334b0f907601ca62070517ea4"} Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.055162 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-khq82"] Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.055911 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:10 crc kubenswrapper[4910]: E1125 21:33:10.057287 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:10.557226744 +0000 UTC m=+146.019703066 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.065260 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4" event={"ID":"3cd48358-1086-47f5-aab1-3acea0c01379","Type":"ContainerStarted","Data":"ab6d42f7b9d3df1f86a6ce7af6c94be6fc7bd16d8f08051cba03ecad07006d3c"} Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.072082 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-5ncw9" podStartSLOduration=125.072065412 podStartE2EDuration="2m5.072065412s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:10.061656671 +0000 UTC m=+145.524132993" watchObservedRunningTime="2025-11-25 21:33:10.072065412 +0000 UTC m=+145.534541734" Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.080423 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" event={"ID":"53f9472e-3cd0-4f90-8320-691acacf6482","Type":"ContainerStarted","Data":"43d67d9dc1acf8df9ec3052594d78fe452cfcf4110270eabdf0353c64b2b61bd"} Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.096203 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" event={"ID":"2e21045d-f1d4-4e65-a7d5-5fd250dba564","Type":"ContainerStarted","Data":"23a139269536d7ee6409dd913673e6e5f2f27ceecb68d28962ccd04c6d4b7dd4"} Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.098840 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" event={"ID":"a90ac3d5-841b-49f1-a6f3-2647f598ab89","Type":"ContainerStarted","Data":"07192fbfaf14e3d5f8a04a5873756bc5f0b9b7e22337c45f3403bb05f279c07d"} Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.103427 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677" event={"ID":"00aa9da1-ce63-4c3f-bbec-4f1c97d85838","Type":"ContainerStarted","Data":"e5ba753f7ca24de4ce1f7e357943ef7829423209ffcf8b2b9f574f5045fe6915"} Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.106620 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" event={"ID":"3e82986a-2957-4450-b122-a47b6d65fd63","Type":"ContainerStarted","Data":"cdeac325c78af82e127759ce1fd8fd82b773689aeffb0af05674b990a4d8e74a"} Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.106954 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.118612 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:33:10 crc kubenswrapper[4910]: W1125 21:33:10.118662 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod07858e25_96fe_4c88_b094_de75bbe5066a.slice/crio-637aec5428c5872d8fa23171f002a8ad752b45b4a34556c402fa8d99e158d9ed WatchSource:0}: Error finding container 637aec5428c5872d8fa23171f002a8ad752b45b4a34556c402fa8d99e158d9ed: Status 404 returned error can't find the container with id 637aec5428c5872d8fa23171f002a8ad752b45b4a34556c402fa8d99e158d9ed Nov 25 21:33:10 crc kubenswrapper[4910]: W1125 21:33:10.120045 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd75b2d0_63c6_4106_83fd_f2c15129ce82.slice/crio-e94e18819453f42d241468a46606e4b1d44c5c0774e6ed4fdfc698ed438cb067 WatchSource:0}: Error finding container e94e18819453f42d241468a46606e4b1d44c5c0774e6ed4fdfc698ed438cb067: Status 404 returned error can't find the container with id e94e18819453f42d241468a46606e4b1d44c5c0774e6ed4fdfc698ed438cb067 Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.159580 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:10 crc kubenswrapper[4910]: E1125 21:33:10.163433 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:10.663403511 +0000 UTC m=+146.125879833 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.262317 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:10 crc kubenswrapper[4910]: E1125 21:33:10.262425 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:10.762405627 +0000 UTC m=+146.224881949 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.263121 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:10 crc kubenswrapper[4910]: E1125 21:33:10.263552 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:10.763535992 +0000 UTC m=+146.226012324 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.371264 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:10 crc kubenswrapper[4910]: E1125 21:33:10.371808 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:10.871785253 +0000 UTC m=+146.334261575 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.487404 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:10 crc kubenswrapper[4910]: E1125 21:33:10.487861 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:10.987844195 +0000 UTC m=+146.450320517 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.535466 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" podStartSLOduration=125.535447334 podStartE2EDuration="2m5.535447334s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:10.533761192 +0000 UTC m=+145.996237514" watchObservedRunningTime="2025-11-25 21:33:10.535447334 +0000 UTC m=+145.997923646" Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.588983 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:10 crc kubenswrapper[4910]: E1125 21:33:10.589565 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:11.089546464 +0000 UTC m=+146.552022786 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.621309 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677" podStartSLOduration=125.621287454 podStartE2EDuration="2m5.621287454s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:10.57192773 +0000 UTC m=+146.034404082" watchObservedRunningTime="2025-11-25 21:33:10.621287454 +0000 UTC m=+146.083763776" Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.621419 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-6lgsx" podStartSLOduration=125.621413377 podStartE2EDuration="2m5.621413377s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:10.616974801 +0000 UTC m=+146.079451133" watchObservedRunningTime="2025-11-25 21:33:10.621413377 +0000 UTC m=+146.083889690" Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.693087 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:10 crc kubenswrapper[4910]: E1125 21:33:10.693971 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:11.193956136 +0000 UTC m=+146.656432458 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.801278 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:10 crc kubenswrapper[4910]: E1125 21:33:10.801666 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:11.30165155 +0000 UTC m=+146.764127872 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:10 crc kubenswrapper[4910]: I1125 21:33:10.903037 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:10 crc kubenswrapper[4910]: E1125 21:33:10.903929 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:11.403910257 +0000 UTC m=+146.866386579 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.004774 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:11 crc kubenswrapper[4910]: E1125 21:33:11.005270 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:11.505233764 +0000 UTC m=+146.967710086 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.026334 4910 patch_prober.go:28] interesting pod/console-operator-58897d9998-6lgsx container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.026406 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-6lgsx" podUID="373d4823-deb1-4353-afb0-fcc3894ecd5a" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.051680 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.051745 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.064014 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.104009 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" podStartSLOduration=126.103988882 podStartE2EDuration="2m6.103988882s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:10.657151421 +0000 UTC m=+146.119627753" watchObservedRunningTime="2025-11-25 21:33:11.103988882 +0000 UTC m=+146.566465204" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.106700 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:11 crc kubenswrapper[4910]: E1125 21:33:11.107149 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:11.607131809 +0000 UTC m=+147.069608131 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.178684 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" event={"ID":"87561ea5-e7c7-4286-aa91-43c6478ff037","Type":"ContainerStarted","Data":"02251d2e689c993c562eea7e552780919128285e1ff34740dca05ea741f49c0c"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.212699 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:11 crc kubenswrapper[4910]: E1125 21:33:11.214409 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:11.714385989 +0000 UTC m=+147.176862311 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.277633 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" event={"ID":"937d04f6-f3ce-47ef-9c90-bb5aae951969","Type":"ContainerStarted","Data":"b06ded2ac3d61ae055f2d6430318f36829480fce87efdd18f6e4b70dd7bffbc2"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.286483 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-w78cp" event={"ID":"769629cb-29e2-4d73-8628-c8ee04bd9040","Type":"ContainerStarted","Data":"7d009a92089376af8847952e7acbff26c3151b9bb45f65f6d7759b3b00e1e3ea"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.315036 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:11 crc kubenswrapper[4910]: E1125 21:33:11.315770 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:11.815756438 +0000 UTC m=+147.278232760 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.322108 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-hs677" event={"ID":"00aa9da1-ce63-4c3f-bbec-4f1c97d85838","Type":"ContainerStarted","Data":"c88326ad84504506223dacb575c24d3e86e880ff7e37c04eeda164569b333be9"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.358774 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-cfjc4" event={"ID":"06539de3-ec9c-42dd-b5cb-c23227463dba","Type":"ContainerStarted","Data":"f765ac914929560cc2b9778eff902f10e662e154e2dc2c3650d0d5def2903361"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.376858 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-z8b2b" event={"ID":"85dbb42a-8a15-46fd-9cf1-5f48bebf8378","Type":"ContainerStarted","Data":"8327f1ec8eaee7deec2384ba3974f6fc89800e8c2ab04fc67c5cd18b5f701ab8"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.416756 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-qptlh" podStartSLOduration=126.416725204 podStartE2EDuration="2m6.416725204s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:11.250076041 +0000 UTC m=+146.712552383" watchObservedRunningTime="2025-11-25 21:33:11.416725204 +0000 UTC m=+146.879201526" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.423734 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:11 crc kubenswrapper[4910]: E1125 21:33:11.425236 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:11.925217216 +0000 UTC m=+147.387693538 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.430855 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6ncxd" event={"ID":"b8eb6262-2c30-4192-8936-9463698c361e","Type":"ContainerStarted","Data":"3daa2877716c6ab282d0fccdb21c903d1b1c398e558a1aba8723d48717d04f8f"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.444436 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" event={"ID":"2e21045d-f1d4-4e65-a7d5-5fd250dba564","Type":"ContainerStarted","Data":"e90cc3c0dabb7dfb9c35928bec30909d8a4378f52c055fda66f369b20c60b6a6"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.455028 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-qmbhh" event={"ID":"089e4ab9-11b3-4b39-b74c-c61227722e66","Type":"ContainerStarted","Data":"c8d8c4686a6cccbb2f9c9303ebd01536761c31bbf95af6bd85c47a4ac95a3574"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.475766 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-cfjc4" podStartSLOduration=125.475748576 podStartE2EDuration="2m5.475748576s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:11.422882224 +0000 UTC m=+146.885358546" watchObservedRunningTime="2025-11-25 21:33:11.475748576 +0000 UTC m=+146.938224898" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.481790 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" event={"ID":"3eecf253-d2c2-46ae-97d9-317d07bd346b","Type":"ContainerStarted","Data":"ed70775f4e7bd0ea28839b6803454b753262d976f9c26b083b860d8f2e4999a8"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.490055 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-z8b2b" podStartSLOduration=7.484180896 podStartE2EDuration="7.484180896s" podCreationTimestamp="2025-11-25 21:33:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:11.470997919 +0000 UTC m=+146.933474241" watchObservedRunningTime="2025-11-25 21:33:11.484180896 +0000 UTC m=+146.946657218" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.503936 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" event={"ID":"07858e25-96fe-4c88-b094-de75bbe5066a","Type":"ContainerStarted","Data":"637aec5428c5872d8fa23171f002a8ad752b45b4a34556c402fa8d99e158d9ed"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.504293 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-6ncxd" podStartSLOduration=125.504275136 podStartE2EDuration="2m5.504275136s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:11.501756639 +0000 UTC m=+146.964232951" watchObservedRunningTime="2025-11-25 21:33:11.504275136 +0000 UTC m=+146.966751458" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.514665 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj" event={"ID":"d5c80d9d-f8b4-4a0e-9787-818e7b029259","Type":"ContainerStarted","Data":"1d6382ca8dce7a13be2b6772b60045e558d00fe2a4c0a806b89c0fe64e0cd337"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.520988 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-mlrxt" event={"ID":"a90ac3d5-841b-49f1-a6f3-2647f598ab89","Type":"ContainerStarted","Data":"5525470ba2f1968e17ae41e7da32cbf0270496c5849dec0b410c0d05a75c4a8f"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.525259 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:11 crc kubenswrapper[4910]: E1125 21:33:11.526487 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:12.026473251 +0000 UTC m=+147.488949573 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.529202 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-krfkq" event={"ID":"dd75b2d0-63c6-4106-83fd-f2c15129ce82","Type":"ContainerStarted","Data":"e94e18819453f42d241468a46606e4b1d44c5c0774e6ed4fdfc698ed438cb067"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.531819 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck" event={"ID":"f1e48d81-784e-4803-8f4c-838f551cf7e0","Type":"ContainerStarted","Data":"529aab027d96a54b699bef48c319098f3c70404c4decfb2fc56de54d0cfa3594"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.546679 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-8pgqj" podStartSLOduration=125.546651704 podStartE2EDuration="2m5.546651704s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:11.54588076 +0000 UTC m=+147.008357082" watchObservedRunningTime="2025-11-25 21:33:11.546651704 +0000 UTC m=+147.009128026" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.558597 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" event={"ID":"f63a8e93-5656-4edc-9ee5-24314ebf749a","Type":"ContainerStarted","Data":"25d97fd1c4f116143560791ce01af7b2ee4823923288042156276eb3ca678b5c"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.559611 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.578054 4910 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-jqcq6 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.578120 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" podUID="f63a8e93-5656-4edc-9ee5-24314ebf749a" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.583021 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" event={"ID":"4cf4299b-4d9a-4b11-bf53-34bf106d39ef","Type":"ContainerStarted","Data":"428609421be42d038be037bb5f04e1164450594195218f3fb1d0c281b5b00705"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.583799 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.600759 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-sfx28" event={"ID":"ffe31541-2d82-48bf-b8ae-eb586019573e","Type":"ContainerStarted","Data":"f305c412944c52d37cb92eb3b87020305e63c50b3d8540333b13a58cae25b60c"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.602081 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-krfkq" podStartSLOduration=125.602052374 podStartE2EDuration="2m5.602052374s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:11.59770681 +0000 UTC m=+147.060183132" watchObservedRunningTime="2025-11-25 21:33:11.602052374 +0000 UTC m=+147.064528696" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.625584 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b" event={"ID":"bba27cfa-4cd1-44cc-ad32-d12e80f8ec3b","Type":"ContainerStarted","Data":"be07784238fbdc313b3243c499a395bf194f55323e4ed15cfcc017fbd35d0f59"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.626780 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:11 crc kubenswrapper[4910]: E1125 21:33:11.629450 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:12.129422979 +0000 UTC m=+147.591899301 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.648734 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-d89ck" podStartSLOduration=125.648714944 podStartE2EDuration="2m5.648714944s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:11.639888792 +0000 UTC m=+147.102365114" watchObservedRunningTime="2025-11-25 21:33:11.648714944 +0000 UTC m=+147.111191266" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.692129 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-7bhcw" event={"ID":"380d647c-be46-416a-9152-605a41509c0c","Type":"ContainerStarted","Data":"b033f81ced2d430dc2c9cd810e49b63a8f51025930a5b23839636b3a4ebe539f"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.735838 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" event={"ID":"fba1f4e8-1272-428d-95eb-7e01208f7b97","Type":"ContainerStarted","Data":"f97125295f6807118c6ad9197dc2e7df3c5083f7eace9bb58a2ee4c691aab985"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.743583 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:11 crc kubenswrapper[4910]: E1125 21:33:11.746111 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:12.24609392 +0000 UTC m=+147.708570242 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.760267 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-wktng" event={"ID":"c41d5586-c61c-42ec-a6c3-c22bf75f7f1e","Type":"ContainerStarted","Data":"1074883747f06b91186249432b4dc693253bfd4cc12b5573ee3e95fba2d6f989"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.780060 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-sfx28" podStartSLOduration=6.780022217 podStartE2EDuration="6.780022217s" podCreationTimestamp="2025-11-25 21:33:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:11.694056214 +0000 UTC m=+147.156532556" watchObservedRunningTime="2025-11-25 21:33:11.780022217 +0000 UTC m=+147.242498539" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.791498 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" podStartSLOduration=126.7914654 podStartE2EDuration="2m6.7914654s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:11.771859015 +0000 UTC m=+147.234335337" watchObservedRunningTime="2025-11-25 21:33:11.7914654 +0000 UTC m=+147.253941722" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.818725 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tbvtg" event={"ID":"b6e50a82-11e5-4428-a1d7-f43cb9f1a2d8","Type":"ContainerStarted","Data":"627d8420752261bf5f362128609d53b2b143fde52028a85d0bfc753f911e488b"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.819233 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tbvtg" event={"ID":"b6e50a82-11e5-4428-a1d7-f43cb9f1a2d8","Type":"ContainerStarted","Data":"8acbd3d1e247d66cabafbea90b2e5006420fac95a581ea79b623ed3baa8f6316"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.837060 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-62t7b" podStartSLOduration=125.837038307 podStartE2EDuration="2m5.837038307s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:11.833395134 +0000 UTC m=+147.295871456" watchObservedRunningTime="2025-11-25 21:33:11.837038307 +0000 UTC m=+147.299514629" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.847438 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:11 crc kubenswrapper[4910]: E1125 21:33:11.848777 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:12.348759378 +0000 UTC m=+147.811235700 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.850588 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" event={"ID":"53f9472e-3cd0-4f90-8320-691acacf6482","Type":"ContainerStarted","Data":"9da1744252724923a67f762541b79d91de0a5ccaf725398ceae3f93cb90e9cc6"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.851210 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.854305 4910 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-sjcdg container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.19:8443/healthz\": dial tcp 10.217.0.19:8443: connect: connection refused" start-of-body= Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.854357 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" podUID="53f9472e-3cd0-4f90-8320-691acacf6482" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.19:8443/healthz\": dial tcp 10.217.0.19:8443: connect: connection refused" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.865010 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" event={"ID":"ae41dbb9-e2c8-4fae-b739-534ec0e520d5","Type":"ContainerStarted","Data":"6c7b37e725a541002048d96cab3ade8f4966f8f523d6bbd642cbc362e33591d7"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.866382 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.880989 4910 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-6nn9f container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:5443/healthz\": dial tcp 10.217.0.41:5443: connect: connection refused" start-of-body= Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.881044 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" podUID="ae41dbb9-e2c8-4fae-b739-534ec0e520d5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.41:5443/healthz\": dial tcp 10.217.0.41:5443: connect: connection refused" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.899673 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k7kph" event={"ID":"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d","Type":"ContainerStarted","Data":"4b3a65e8ea35a8ebc8b754ee6c44e0b73d5f1ac6b520c82ebc7215aef62d5966"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.899966 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" podStartSLOduration=126.899939768 podStartE2EDuration="2m6.899939768s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:11.881802338 +0000 UTC m=+147.344278660" watchObservedRunningTime="2025-11-25 21:33:11.899939768 +0000 UTC m=+147.362416090" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.926201 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" event={"ID":"fb8c2b0c-00aa-406d-abb6-e989dbe3abea","Type":"ContainerStarted","Data":"e67b97308ff8630ac0d7ef4093f371460010b8a363522540a066ed5c43e93beb"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.926938 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.935160 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-47x5d" event={"ID":"685f36d8-75f7-4b23-8eba-081657468d03","Type":"ContainerStarted","Data":"718578a36ea17e53f2b8f908cb2622810a3532528aaba6f43b6c02a7f85eee76"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.936691 4910 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-78rzv container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/healthz\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.936738 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" podUID="fb8c2b0c-00aa-406d-abb6-e989dbe3abea" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.25:8080/healthz\": dial tcp 10.217.0.25:8080: connect: connection refused" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.949828 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:11 crc kubenswrapper[4910]: E1125 21:33:11.953653 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:12.453637065 +0000 UTC m=+147.916113387 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.958442 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-7bhcw" podStartSLOduration=125.958410772 podStartE2EDuration="2m5.958410772s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:11.957791253 +0000 UTC m=+147.420267575" watchObservedRunningTime="2025-11-25 21:33:11.958410772 +0000 UTC m=+147.420887084" Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.982314 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-shg9w" event={"ID":"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e","Type":"ContainerStarted","Data":"39106c1f1e6e3ba0cf8608eb4827924bcb9ec356311887a96579cca3c3bd198e"} Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.983001 4910 patch_prober.go:28] interesting pod/downloads-7954f5f757-4hf2m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Nov 25 21:33:11 crc kubenswrapper[4910]: I1125 21:33:11.984480 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4hf2m" podUID="761b3955-fd92-419a-934c-31f294bbecde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.013610 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-6lgsx" Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.042554 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-tfpf5" Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.052263 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:12 crc kubenswrapper[4910]: E1125 21:33:12.054827 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:12.554783296 +0000 UTC m=+148.017259618 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.160490 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:12 crc kubenswrapper[4910]: E1125 21:33:12.161017 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:12.661000654 +0000 UTC m=+148.123476976 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.186451 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.186939 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" podStartSLOduration=126.186921074 podStartE2EDuration="2m6.186921074s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:12.186228113 +0000 UTC m=+147.648704455" watchObservedRunningTime="2025-11-25 21:33:12.186921074 +0000 UTC m=+147.649397396" Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.187148 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" podStartSLOduration=126.187143221 podStartE2EDuration="2m6.187143221s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:12.087130484 +0000 UTC m=+147.549606806" watchObservedRunningTime="2025-11-25 21:33:12.187143221 +0000 UTC m=+147.649619543" Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.209563 4910 patch_prober.go:28] interesting pod/router-default-5444994796-cfjc4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 21:33:12 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Nov 25 21:33:12 crc kubenswrapper[4910]: [+]process-running ok Nov 25 21:33:12 crc kubenswrapper[4910]: healthz check failed Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.209652 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-cfjc4" podUID="06539de3-ec9c-42dd-b5cb-c23227463dba" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.261889 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:12 crc kubenswrapper[4910]: E1125 21:33:12.262025 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:12.762006642 +0000 UTC m=+148.224482964 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.262103 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:12 crc kubenswrapper[4910]: E1125 21:33:12.262445 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:12.762437255 +0000 UTC m=+148.224913577 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.325966 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4" podStartSLOduration=126.325947225 podStartE2EDuration="2m6.325947225s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:12.224587677 +0000 UTC m=+147.687063999" watchObservedRunningTime="2025-11-25 21:33:12.325947225 +0000 UTC m=+147.788423547" Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.367121 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:12 crc kubenswrapper[4910]: E1125 21:33:12.367458 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:12.867444556 +0000 UTC m=+148.329920878 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.468330 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:12 crc kubenswrapper[4910]: E1125 21:33:12.468597 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:12.968586558 +0000 UTC m=+148.431062880 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.469650 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-k7kph" podStartSLOduration=127.46963472 podStartE2EDuration="2m7.46963472s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:12.468393062 +0000 UTC m=+147.930869384" watchObservedRunningTime="2025-11-25 21:33:12.46963472 +0000 UTC m=+147.932111042" Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.470475 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" podStartSLOduration=126.470469186 podStartE2EDuration="2m6.470469186s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:12.325758059 +0000 UTC m=+147.788234381" watchObservedRunningTime="2025-11-25 21:33:12.470469186 +0000 UTC m=+147.932945508" Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.569184 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:12 crc kubenswrapper[4910]: E1125 21:33:12.569634 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.069620856 +0000 UTC m=+148.532097178 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.670598 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:12 crc kubenswrapper[4910]: E1125 21:33:12.671064 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.171049156 +0000 UTC m=+148.633525478 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.772218 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:12 crc kubenswrapper[4910]: E1125 21:33:12.772386 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.272358763 +0000 UTC m=+148.734835085 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.772637 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:12 crc kubenswrapper[4910]: E1125 21:33:12.772986 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.272969702 +0000 UTC m=+148.735446024 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.874528 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:12 crc kubenswrapper[4910]: E1125 21:33:12.874735 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.374701622 +0000 UTC m=+148.837177944 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.875520 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:12 crc kubenswrapper[4910]: E1125 21:33:12.875882 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.375872978 +0000 UTC m=+148.838349300 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.976681 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:12 crc kubenswrapper[4910]: E1125 21:33:12.976881 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.476840724 +0000 UTC m=+148.939317046 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.977094 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:12 crc kubenswrapper[4910]: E1125 21:33:12.977447 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.477435363 +0000 UTC m=+148.939911675 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.988720 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-krfkq" event={"ID":"dd75b2d0-63c6-4106-83fd-f2c15129ce82","Type":"ContainerStarted","Data":"73c3dac550aa6fc135c0b9e27c786e8931465e45f5cef2c20844cee961476602"} Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.990645 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" event={"ID":"937d04f6-f3ce-47ef-9c90-bb5aae951969","Type":"ContainerStarted","Data":"58658253de4f728cb68306b254aaae1ae93a01b73db5bf8c15d518d96f5e67c7"} Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.992436 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-qmbhh" event={"ID":"089e4ab9-11b3-4b39-b74c-c61227722e66","Type":"ContainerStarted","Data":"034e88011d6e184158c041f017f96ab8d2f3e4c8a67a53f65333fdf86dc3c2ec"} Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.992500 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-qmbhh" event={"ID":"089e4ab9-11b3-4b39-b74c-c61227722e66","Type":"ContainerStarted","Data":"0484e25cef97eb9cc51cf93bb5d1fe0529cf9f113fc0dd82aeeeb96670cc52bd"} Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.992550 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-qmbhh" Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.995131 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" event={"ID":"07858e25-96fe-4c88-b094-de75bbe5066a","Type":"ContainerStarted","Data":"23721949e5b35a89c01fef71109b55255e3776938d307eb7315f40ea9bc86aeb"} Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.995288 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" Nov 25 21:33:12 crc kubenswrapper[4910]: I1125 21:33:12.998477 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-k7kph" event={"ID":"df48041a-e8d7-49d2-a8a8-159d5ed5ec6d","Type":"ContainerStarted","Data":"c59c8719f9000380c40e748e64a77bda8b60f3c20d9cd0227aaec4abc6cfde42"} Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.000211 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-w78cp" event={"ID":"769629cb-29e2-4d73-8628-c8ee04bd9040","Type":"ContainerStarted","Data":"d7107b372e33bd1772a913883249371e7237760a33e647bcd1013b56212072c6"} Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.001858 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" event={"ID":"2e21045d-f1d4-4e65-a7d5-5fd250dba564","Type":"ContainerStarted","Data":"37f9d7cd531ce5b75149436e67b53ccd361170117bd00735388e834f085348db"} Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.002962 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-sfx28" event={"ID":"ffe31541-2d82-48bf-b8ae-eb586019573e","Type":"ContainerStarted","Data":"9f6d613312fa31f7d81668cb7c92f891a1652672bbcda16182e807d212a0435f"} Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.005099 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" event={"ID":"fb8c2b0c-00aa-406d-abb6-e989dbe3abea","Type":"ContainerStarted","Data":"e2930ae664a56c68a5e0c5e1c5e0fd1ff5d53e85aece04c1d436680b0a3fe978"} Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.005625 4910 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-78rzv container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/healthz\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.005684 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" podUID="fb8c2b0c-00aa-406d-abb6-e989dbe3abea" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.25:8080/healthz\": dial tcp 10.217.0.25:8080: connect: connection refused" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.007388 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-wktng" event={"ID":"c41d5586-c61c-42ec-a6c3-c22bf75f7f1e","Type":"ContainerStarted","Data":"ae3157e762306c591619aa70fc24bd256bee73d3e926e0b7fd2538ce44f1ea14"} Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.009646 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tbvtg" event={"ID":"b6e50a82-11e5-4428-a1d7-f43cb9f1a2d8","Type":"ContainerStarted","Data":"98dd80601513709192cb240138517231137a7ccb1882441e48c6e5ad5606f024"} Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.011232 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw" event={"ID":"1e473d16-b8d7-4f50-966f-8c3536051b54","Type":"ContainerStarted","Data":"8eeff34bbb7642886add2996cf5b34924bc0b9315f65e4654e8635a86d75ec42"} Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.011285 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw" event={"ID":"1e473d16-b8d7-4f50-966f-8c3536051b54","Type":"ContainerStarted","Data":"72b0405566e9f50c77a3127f9884ae438c20323bddfffafbb3eec80de826bae5"} Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.011380 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.014641 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" event={"ID":"3eecf253-d2c2-46ae-97d9-317d07bd346b","Type":"ContainerStarted","Data":"603fd437339bc09b3a487f161483ebff209bd8738ac892a4b3462cb790edab00"} Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.014687 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" event={"ID":"3eecf253-d2c2-46ae-97d9-317d07bd346b","Type":"ContainerStarted","Data":"c9c7501d14c20fc3f73f7f2edac50da7c29c08d07a414a8128077b4762a9ae31"} Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.017091 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" event={"ID":"ae41dbb9-e2c8-4fae-b739-534ec0e520d5","Type":"ContainerStarted","Data":"1725d4d442ee965f26f2bc23dc9e25ee25f7bf38bf3f6caefcde5e8a9789c702"} Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.017974 4910 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-6nn9f container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.41:5443/healthz\": dial tcp 10.217.0.41:5443: connect: connection refused" start-of-body= Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.018028 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" podUID="ae41dbb9-e2c8-4fae-b739-534ec0e520d5" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.41:5443/healthz\": dial tcp 10.217.0.41:5443: connect: connection refused" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.018946 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" podStartSLOduration=127.018929803 podStartE2EDuration="2m7.018929803s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:12.619911698 +0000 UTC m=+148.082388030" watchObservedRunningTime="2025-11-25 21:33:13.018929803 +0000 UTC m=+148.481406125" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.019665 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-t2d5g" podStartSLOduration=127.019659096 podStartE2EDuration="2m7.019659096s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:13.016579901 +0000 UTC m=+148.479056223" watchObservedRunningTime="2025-11-25 21:33:13.019659096 +0000 UTC m=+148.482135418" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.022567 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" event={"ID":"fba1f4e8-1272-428d-95eb-7e01208f7b97","Type":"ContainerStarted","Data":"326bc5580b1be5088033ece2c528c2ec47df33c236b165c974770c30749b051c"} Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.024693 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-jwwk4" event={"ID":"3cd48358-1086-47f5-aab1-3acea0c01379","Type":"ContainerStarted","Data":"79a291c311d3f67d43aebc3163ade0c0d37518398905bb0c4811290a29d979a5"} Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.031710 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.041546 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.065672 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw" podStartSLOduration=127.065650705 podStartE2EDuration="2m7.065650705s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:13.05771251 +0000 UTC m=+148.520188832" watchObservedRunningTime="2025-11-25 21:33:13.065650705 +0000 UTC m=+148.528127027" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.078360 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-sjcdg" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.079374 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:13 crc kubenswrapper[4910]: E1125 21:33:13.082005 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.581983369 +0000 UTC m=+149.044459691 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.089312 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-tdzzn" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.185692 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-wktng" podStartSLOduration=127.185651689 podStartE2EDuration="2m7.185651689s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:13.151367741 +0000 UTC m=+148.613844063" watchObservedRunningTime="2025-11-25 21:33:13.185651689 +0000 UTC m=+148.648128011" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.211493 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:13 crc kubenswrapper[4910]: E1125 21:33:13.211937 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.71192328 +0000 UTC m=+149.174399602 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.214859 4910 patch_prober.go:28] interesting pod/router-default-5444994796-cfjc4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 21:33:13 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Nov 25 21:33:13 crc kubenswrapper[4910]: [+]process-running ok Nov 25 21:33:13 crc kubenswrapper[4910]: healthz check failed Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.214964 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-cfjc4" podUID="06539de3-ec9c-42dd-b5cb-c23227463dba" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.230169 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-khq82" podStartSLOduration=127.230152513 podStartE2EDuration="2m7.230152513s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:13.228743349 +0000 UTC m=+148.691219671" watchObservedRunningTime="2025-11-25 21:33:13.230152513 +0000 UTC m=+148.692628835" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.305011 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-qmbhh" podStartSLOduration=8.304992382 podStartE2EDuration="8.304992382s" podCreationTimestamp="2025-11-25 21:33:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:13.302896158 +0000 UTC m=+148.765372480" watchObservedRunningTime="2025-11-25 21:33:13.304992382 +0000 UTC m=+148.767468704" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.313797 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:13 crc kubenswrapper[4910]: E1125 21:33:13.314205 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.814190846 +0000 UTC m=+149.276667158 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.415673 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:13 crc kubenswrapper[4910]: E1125 21:33:13.416089 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:13.916072551 +0000 UTC m=+149.378548873 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.455487 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-tbvtg" podStartSLOduration=127.455468357 podStartE2EDuration="2m7.455468357s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:13.37034835 +0000 UTC m=+148.832824672" watchObservedRunningTime="2025-11-25 21:33:13.455468357 +0000 UTC m=+148.917944679" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.517032 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:13 crc kubenswrapper[4910]: E1125 21:33:13.517255 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:14.017194532 +0000 UTC m=+149.479670854 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.517306 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.517360 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.517399 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.517438 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.517470 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:33:13 crc kubenswrapper[4910]: E1125 21:33:13.517767 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:14.017757209 +0000 UTC m=+149.480233581 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.518505 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.528217 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.528216 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.534981 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.572167 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-ldfjq" podStartSLOduration=127.572136418 podStartE2EDuration="2m7.572136418s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:13.472607816 +0000 UTC m=+148.935084138" watchObservedRunningTime="2025-11-25 21:33:13.572136418 +0000 UTC m=+149.034612740" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.580359 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-7lr62" podStartSLOduration=127.58032997 podStartE2EDuration="2m7.58032997s" podCreationTimestamp="2025-11-25 21:31:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:13.567905377 +0000 UTC m=+149.030381699" watchObservedRunningTime="2025-11-25 21:33:13.58032997 +0000 UTC m=+149.042806292" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.616493 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.620914 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:13 crc kubenswrapper[4910]: E1125 21:33:13.621486 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:14.12146439 +0000 UTC m=+149.583940702 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.636569 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.637033 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.724939 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:13 crc kubenswrapper[4910]: E1125 21:33:13.725286 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:14.225273984 +0000 UTC m=+149.687750306 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.825962 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:13 crc kubenswrapper[4910]: E1125 21:33:13.826761 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:14.326742016 +0000 UTC m=+149.789218338 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:13 crc kubenswrapper[4910]: I1125 21:33:13.928391 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:13 crc kubenswrapper[4910]: E1125 21:33:13.928917 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:14.428905259 +0000 UTC m=+149.891381581 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.033184 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:14 crc kubenswrapper[4910]: E1125 21:33:14.033359 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:14.533333322 +0000 UTC m=+149.995809644 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.033454 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:14 crc kubenswrapper[4910]: E1125 21:33:14.033756 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:14.533743995 +0000 UTC m=+149.996220317 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.068431 4910 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-78rzv container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.25:8080/healthz\": dial tcp 10.217.0.25:8080: connect: connection refused" start-of-body= Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.068489 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" podUID="fb8c2b0c-00aa-406d-abb6-e989dbe3abea" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.25:8080/healthz\": dial tcp 10.217.0.25:8080: connect: connection refused" Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.137562 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:14 crc kubenswrapper[4910]: E1125 21:33:14.140304 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:14.640234181 +0000 UTC m=+150.102710503 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.210633 4910 patch_prober.go:28] interesting pod/router-default-5444994796-cfjc4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 21:33:14 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Nov 25 21:33:14 crc kubenswrapper[4910]: [+]process-running ok Nov 25 21:33:14 crc kubenswrapper[4910]: healthz check failed Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.211055 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-cfjc4" podUID="06539de3-ec9c-42dd-b5cb-c23227463dba" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.240184 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:14 crc kubenswrapper[4910]: E1125 21:33:14.240547 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:14.740532787 +0000 UTC m=+150.203009109 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.342677 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:14 crc kubenswrapper[4910]: E1125 21:33:14.343061 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:14.843046591 +0000 UTC m=+150.305522913 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.443829 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:14 crc kubenswrapper[4910]: E1125 21:33:14.444114 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:14.94410226 +0000 UTC m=+150.406578582 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.545747 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:14 crc kubenswrapper[4910]: E1125 21:33:14.546205 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:15.046190041 +0000 UTC m=+150.508666363 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.647667 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:14 crc kubenswrapper[4910]: E1125 21:33:14.648065 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:15.148052294 +0000 UTC m=+150.610528616 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.706594 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-h29m6"] Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.708457 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.710791 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.736122 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-h29m6"] Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.752173 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:14 crc kubenswrapper[4910]: E1125 21:33:14.752465 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:15.252439106 +0000 UTC m=+150.714915428 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.753031 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:14 crc kubenswrapper[4910]: E1125 21:33:14.753571 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:15.253558641 +0000 UTC m=+150.716034963 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.854629 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.855345 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cscks\" (UniqueName: \"kubernetes.io/projected/0b5ec6d7-57b0-4082-9310-a18457ea9c36-kube-api-access-cscks\") pod \"certified-operators-h29m6\" (UID: \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\") " pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.855458 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b5ec6d7-57b0-4082-9310-a18457ea9c36-utilities\") pod \"certified-operators-h29m6\" (UID: \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\") " pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.855553 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b5ec6d7-57b0-4082-9310-a18457ea9c36-catalog-content\") pod \"certified-operators-h29m6\" (UID: \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\") " pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:33:14 crc kubenswrapper[4910]: E1125 21:33:14.855703 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:15.355687663 +0000 UTC m=+150.818163985 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.910536 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5ffq9"] Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.911802 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.915207 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.921208 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5ffq9"] Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.977027 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cscks\" (UniqueName: \"kubernetes.io/projected/0b5ec6d7-57b0-4082-9310-a18457ea9c36-kube-api-access-cscks\") pod \"certified-operators-h29m6\" (UID: \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\") " pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.977072 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b5ec6d7-57b0-4082-9310-a18457ea9c36-utilities\") pod \"certified-operators-h29m6\" (UID: \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\") " pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.977099 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.977153 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b5ec6d7-57b0-4082-9310-a18457ea9c36-catalog-content\") pod \"certified-operators-h29m6\" (UID: \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\") " pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.979848 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b5ec6d7-57b0-4082-9310-a18457ea9c36-catalog-content\") pod \"certified-operators-h29m6\" (UID: \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\") " pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.979913 4910 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 25 21:33:14 crc kubenswrapper[4910]: I1125 21:33:14.980135 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b5ec6d7-57b0-4082-9310-a18457ea9c36-utilities\") pod \"certified-operators-h29m6\" (UID: \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\") " pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:33:14 crc kubenswrapper[4910]: E1125 21:33:14.980435 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:15.480413992 +0000 UTC m=+150.942890314 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.001827 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cscks\" (UniqueName: \"kubernetes.io/projected/0b5ec6d7-57b0-4082-9310-a18457ea9c36-kube-api-access-cscks\") pod \"certified-operators-h29m6\" (UID: \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\") " pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.039499 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-6nn9f" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.076421 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.078732 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:15 crc kubenswrapper[4910]: E1125 21:33:15.078869 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:15.57884783 +0000 UTC m=+151.041324152 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.079013 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.079070 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bsfcr\" (UniqueName: \"kubernetes.io/projected/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-kube-api-access-bsfcr\") pod \"community-operators-5ffq9\" (UID: \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\") " pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.079126 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-catalog-content\") pod \"community-operators-5ffq9\" (UID: \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\") " pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.079157 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-utilities\") pod \"community-operators-5ffq9\" (UID: \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\") " pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:33:15 crc kubenswrapper[4910]: E1125 21:33:15.079338 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:15.579328455 +0000 UTC m=+151.041804777 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.094789 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rrwnh"] Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.096135 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.110837 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"5d309298e7f1b87314a52acc72e9359b3560730ea52f142e68b23909b90e6028"} Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.110897 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"42709a7cc7ab8ba48c011bba6909b01738286ae6084d77cf09f4ca72997aab19"} Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.114885 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"c873f930c77266f208d1dc27e2b37bf7900a05b982414c8db1c5b494451e943b"} Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.114929 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"18a4deac10b8ea38e7a577289fd7b4e6d787c24bdfe747088fa5861dfd86027b"} Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.115389 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.118618 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rrwnh"] Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.128546 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"3c935f9e40865b7c2d99b0278a5bd4ee7f5003005c4c58ff33f15349e131c87a"} Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.128603 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"5208801fb0fff6f6d1bafec1621535e5b2ed39a0b39ac1b958f6d302b3d2e795"} Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.150968 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-w78cp" event={"ID":"769629cb-29e2-4d73-8628-c8ee04bd9040","Type":"ContainerStarted","Data":"d5d8a907f3f44d7a0e530fe8ae900cbbab3067034cca813e9bfa2dd954f2573a"} Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.151019 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-w78cp" event={"ID":"769629cb-29e2-4d73-8628-c8ee04bd9040","Type":"ContainerStarted","Data":"f157a103012e9f966a0432e08f845ba01ca477b2ac19f17d6a632dd3d4423277"} Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.182143 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.182398 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2fpv\" (UniqueName: \"kubernetes.io/projected/e450be6d-4615-4cdf-8c24-edc01fa86412-kube-api-access-n2fpv\") pod \"certified-operators-rrwnh\" (UID: \"e450be6d-4615-4cdf-8c24-edc01fa86412\") " pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.182441 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bsfcr\" (UniqueName: \"kubernetes.io/projected/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-kube-api-access-bsfcr\") pod \"community-operators-5ffq9\" (UID: \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\") " pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.182479 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e450be6d-4615-4cdf-8c24-edc01fa86412-utilities\") pod \"certified-operators-rrwnh\" (UID: \"e450be6d-4615-4cdf-8c24-edc01fa86412\") " pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.182507 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-catalog-content\") pod \"community-operators-5ffq9\" (UID: \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\") " pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.182527 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e450be6d-4615-4cdf-8c24-edc01fa86412-catalog-content\") pod \"certified-operators-rrwnh\" (UID: \"e450be6d-4615-4cdf-8c24-edc01fa86412\") " pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.182554 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-utilities\") pod \"community-operators-5ffq9\" (UID: \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\") " pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:33:15 crc kubenswrapper[4910]: E1125 21:33:15.183054 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:15.683035066 +0000 UTC m=+151.145511388 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.183338 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-utilities\") pod \"community-operators-5ffq9\" (UID: \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\") " pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.183809 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-catalog-content\") pod \"community-operators-5ffq9\" (UID: \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\") " pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.201470 4910 patch_prober.go:28] interesting pod/router-default-5444994796-cfjc4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 21:33:15 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Nov 25 21:33:15 crc kubenswrapper[4910]: [+]process-running ok Nov 25 21:33:15 crc kubenswrapper[4910]: healthz check failed Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.201537 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-cfjc4" podUID="06539de3-ec9c-42dd-b5cb-c23227463dba" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.219470 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bsfcr\" (UniqueName: \"kubernetes.io/projected/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-kube-api-access-bsfcr\") pod \"community-operators-5ffq9\" (UID: \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\") " pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.283495 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e450be6d-4615-4cdf-8c24-edc01fa86412-catalog-content\") pod \"certified-operators-rrwnh\" (UID: \"e450be6d-4615-4cdf-8c24-edc01fa86412\") " pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.284054 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.284093 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2fpv\" (UniqueName: \"kubernetes.io/projected/e450be6d-4615-4cdf-8c24-edc01fa86412-kube-api-access-n2fpv\") pod \"certified-operators-rrwnh\" (UID: \"e450be6d-4615-4cdf-8c24-edc01fa86412\") " pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.284148 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e450be6d-4615-4cdf-8c24-edc01fa86412-utilities\") pod \"certified-operators-rrwnh\" (UID: \"e450be6d-4615-4cdf-8c24-edc01fa86412\") " pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.285412 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e450be6d-4615-4cdf-8c24-edc01fa86412-catalog-content\") pod \"certified-operators-rrwnh\" (UID: \"e450be6d-4615-4cdf-8c24-edc01fa86412\") " pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:33:15 crc kubenswrapper[4910]: E1125 21:33:15.286102 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-25 21:33:15.786088147 +0000 UTC m=+151.248564469 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-kdgbm" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.286611 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e450be6d-4615-4cdf-8c24-edc01fa86412-utilities\") pod \"certified-operators-rrwnh\" (UID: \"e450be6d-4615-4cdf-8c24-edc01fa86412\") " pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.311975 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tfpcn"] Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.315197 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.324441 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.324649 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2fpv\" (UniqueName: \"kubernetes.io/projected/e450be6d-4615-4cdf-8c24-edc01fa86412-kube-api-access-n2fpv\") pod \"certified-operators-rrwnh\" (UID: \"e450be6d-4615-4cdf-8c24-edc01fa86412\") " pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.362893 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tfpcn"] Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.385228 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:15 crc kubenswrapper[4910]: E1125 21:33:15.386024 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-25 21:33:15.88598328 +0000 UTC m=+151.348459602 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.437592 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.441752 4910 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-25T21:33:14.979943528Z","Handler":null,"Name":""} Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.448553 4910 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.448593 4910 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.486777 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33a592b6-e1fb-412c-8768-b83a79e08f61-utilities\") pod \"community-operators-tfpcn\" (UID: \"33a592b6-e1fb-412c-8768-b83a79e08f61\") " pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.487538 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33a592b6-e1fb-412c-8768-b83a79e08f61-catalog-content\") pod \"community-operators-tfpcn\" (UID: \"33a592b6-e1fb-412c-8768-b83a79e08f61\") " pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.487683 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zvlt\" (UniqueName: \"kubernetes.io/projected/33a592b6-e1fb-412c-8768-b83a79e08f61-kube-api-access-4zvlt\") pod \"community-operators-tfpcn\" (UID: \"33a592b6-e1fb-412c-8768-b83a79e08f61\") " pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.487795 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.491801 4910 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.491867 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.573467 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-kdgbm\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.589676 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.589880 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33a592b6-e1fb-412c-8768-b83a79e08f61-utilities\") pod \"community-operators-tfpcn\" (UID: \"33a592b6-e1fb-412c-8768-b83a79e08f61\") " pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.590002 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33a592b6-e1fb-412c-8768-b83a79e08f61-catalog-content\") pod \"community-operators-tfpcn\" (UID: \"33a592b6-e1fb-412c-8768-b83a79e08f61\") " pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.590054 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zvlt\" (UniqueName: \"kubernetes.io/projected/33a592b6-e1fb-412c-8768-b83a79e08f61-kube-api-access-4zvlt\") pod \"community-operators-tfpcn\" (UID: \"33a592b6-e1fb-412c-8768-b83a79e08f61\") " pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.590726 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33a592b6-e1fb-412c-8768-b83a79e08f61-utilities\") pod \"community-operators-tfpcn\" (UID: \"33a592b6-e1fb-412c-8768-b83a79e08f61\") " pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.591104 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33a592b6-e1fb-412c-8768-b83a79e08f61-catalog-content\") pod \"community-operators-tfpcn\" (UID: \"33a592b6-e1fb-412c-8768-b83a79e08f61\") " pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.593580 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.637222 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zvlt\" (UniqueName: \"kubernetes.io/projected/33a592b6-e1fb-412c-8768-b83a79e08f61-kube-api-access-4zvlt\") pod \"community-operators-tfpcn\" (UID: \"33a592b6-e1fb-412c-8768-b83a79e08f61\") " pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.653225 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-h29m6"] Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.668682 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:15 crc kubenswrapper[4910]: I1125 21:33:15.777850 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.030861 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5ffq9"] Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.073783 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kdgbm"] Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.146938 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rrwnh"] Nov 25 21:33:16 crc kubenswrapper[4910]: E1125 21:33:16.184934 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b5ec6d7_57b0_4082_9310_a18457ea9c36.slice/crio-36b8a649a3e7ed02d84475c333f51423b8c8b2b7d60117b5cd8fcf741b9e6c4b.scope\": RecentStats: unable to find data in memory cache]" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.193893 4910 patch_prober.go:28] interesting pod/router-default-5444994796-cfjc4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 21:33:16 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Nov 25 21:33:16 crc kubenswrapper[4910]: [+]process-running ok Nov 25 21:33:16 crc kubenswrapper[4910]: healthz check failed Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.193945 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-cfjc4" podUID="06539de3-ec9c-42dd-b5cb-c23227463dba" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.193899 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-w78cp" event={"ID":"769629cb-29e2-4d73-8628-c8ee04bd9040","Type":"ContainerStarted","Data":"786c034eccde4a6dcce44ee6dbbe0169e82fc952961da6c8834bcb2938559f58"} Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.195340 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" event={"ID":"3f10744e-fb73-4689-979b-59c32ba0ae6a","Type":"ContainerStarted","Data":"e66ff8f6d6148a9a78004bea2eac7d6c36acd6f388edc1e42c7092b64cbce92b"} Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.204054 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h29m6" event={"ID":"0b5ec6d7-57b0-4082-9310-a18457ea9c36","Type":"ContainerStarted","Data":"36b8a649a3e7ed02d84475c333f51423b8c8b2b7d60117b5cd8fcf741b9e6c4b"} Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.204100 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h29m6" event={"ID":"0b5ec6d7-57b0-4082-9310-a18457ea9c36","Type":"ContainerStarted","Data":"4872b33c8aaf0f41daa2aced35e06454a8afc2148437f34f1e104b48bd55c519"} Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.206745 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.211053 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5ffq9" event={"ID":"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c","Type":"ContainerStarted","Data":"11676b0cc3a89aca6956c72aa6469d1464b9a2e349851080abe07748d7e6b04d"} Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.221467 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-w78cp" podStartSLOduration=11.221448295 podStartE2EDuration="11.221448295s" podCreationTimestamp="2025-11-25 21:33:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:16.219757222 +0000 UTC m=+151.682233554" watchObservedRunningTime="2025-11-25 21:33:16.221448295 +0000 UTC m=+151.683924617" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.253801 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.254321 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.270808 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.386967 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tfpcn"] Nov 25 21:33:16 crc kubenswrapper[4910]: W1125 21:33:16.499908 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod33a592b6_e1fb_412c_8768_b83a79e08f61.slice/crio-1d9f176b3969dd7d45a690d37428842caf35c053e35e3f5f6fbfd98859933d08 WatchSource:0}: Error finding container 1d9f176b3969dd7d45a690d37428842caf35c053e35e3f5f6fbfd98859933d08: Status 404 returned error can't find the container with id 1d9f176b3969dd7d45a690d37428842caf35c053e35e3f5f6fbfd98859933d08 Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.716374 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.717564 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.720782 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.722824 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.734450 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.817207 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2d3a577f-7a7a-4bad-96ce-1ad95f68aea8-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2d3a577f-7a7a-4bad-96ce-1ad95f68aea8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.817323 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2d3a577f-7a7a-4bad-96ce-1ad95f68aea8-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2d3a577f-7a7a-4bad-96ce-1ad95f68aea8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.894624 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mkr2q"] Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.895890 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.898181 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.916912 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mkr2q"] Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.919361 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2d3a577f-7a7a-4bad-96ce-1ad95f68aea8-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2d3a577f-7a7a-4bad-96ce-1ad95f68aea8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.919433 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2d3a577f-7a7a-4bad-96ce-1ad95f68aea8-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2d3a577f-7a7a-4bad-96ce-1ad95f68aea8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.919603 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2d3a577f-7a7a-4bad-96ce-1ad95f68aea8-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2d3a577f-7a7a-4bad-96ce-1ad95f68aea8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 21:33:16 crc kubenswrapper[4910]: I1125 21:33:16.949728 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2d3a577f-7a7a-4bad-96ce-1ad95f68aea8-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2d3a577f-7a7a-4bad-96ce-1ad95f68aea8\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.021627 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r77n9\" (UniqueName: \"kubernetes.io/projected/a49a70fd-643c-46d8-9f89-adf75cc92ca9-kube-api-access-r77n9\") pod \"redhat-marketplace-mkr2q\" (UID: \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\") " pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.021715 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a49a70fd-643c-46d8-9f89-adf75cc92ca9-utilities\") pod \"redhat-marketplace-mkr2q\" (UID: \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\") " pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.021748 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a49a70fd-643c-46d8-9f89-adf75cc92ca9-catalog-content\") pod \"redhat-marketplace-mkr2q\" (UID: \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\") " pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.048000 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.123534 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r77n9\" (UniqueName: \"kubernetes.io/projected/a49a70fd-643c-46d8-9f89-adf75cc92ca9-kube-api-access-r77n9\") pod \"redhat-marketplace-mkr2q\" (UID: \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\") " pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.123631 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a49a70fd-643c-46d8-9f89-adf75cc92ca9-utilities\") pod \"redhat-marketplace-mkr2q\" (UID: \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\") " pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.123736 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a49a70fd-643c-46d8-9f89-adf75cc92ca9-catalog-content\") pod \"redhat-marketplace-mkr2q\" (UID: \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\") " pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.124337 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a49a70fd-643c-46d8-9f89-adf75cc92ca9-utilities\") pod \"redhat-marketplace-mkr2q\" (UID: \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\") " pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.124607 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a49a70fd-643c-46d8-9f89-adf75cc92ca9-catalog-content\") pod \"redhat-marketplace-mkr2q\" (UID: \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\") " pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.142035 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r77n9\" (UniqueName: \"kubernetes.io/projected/a49a70fd-643c-46d8-9f89-adf75cc92ca9-kube-api-access-r77n9\") pod \"redhat-marketplace-mkr2q\" (UID: \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\") " pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.192749 4910 patch_prober.go:28] interesting pod/router-default-5444994796-cfjc4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 21:33:17 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Nov 25 21:33:17 crc kubenswrapper[4910]: [+]process-running ok Nov 25 21:33:17 crc kubenswrapper[4910]: healthz check failed Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.193035 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-cfjc4" podUID="06539de3-ec9c-42dd-b5cb-c23227463dba" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.213603 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.243055 4910 generic.go:334] "Generic (PLEG): container finished" podID="33a592b6-e1fb-412c-8768-b83a79e08f61" containerID="18f5f4b1758207006544f331ef45146cb88f661770b2389b5efa22525deb101b" exitCode=0 Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.243153 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tfpcn" event={"ID":"33a592b6-e1fb-412c-8768-b83a79e08f61","Type":"ContainerDied","Data":"18f5f4b1758207006544f331ef45146cb88f661770b2389b5efa22525deb101b"} Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.243196 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tfpcn" event={"ID":"33a592b6-e1fb-412c-8768-b83a79e08f61","Type":"ContainerStarted","Data":"1d9f176b3969dd7d45a690d37428842caf35c053e35e3f5f6fbfd98859933d08"} Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.255155 4910 generic.go:334] "Generic (PLEG): container finished" podID="e450be6d-4615-4cdf-8c24-edc01fa86412" containerID="6c098fce5a670f64b853e12769ea01fbfe6af5aa9e0cdf1f062c85bf91e18365" exitCode=0 Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.255366 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rrwnh" event={"ID":"e450be6d-4615-4cdf-8c24-edc01fa86412","Type":"ContainerDied","Data":"6c098fce5a670f64b853e12769ea01fbfe6af5aa9e0cdf1f062c85bf91e18365"} Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.255461 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rrwnh" event={"ID":"e450be6d-4615-4cdf-8c24-edc01fa86412","Type":"ContainerStarted","Data":"0a253d368ad94081f3d382959d4cc29ffe8083a689d850101c3b18e2b738a6b1"} Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.264939 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" event={"ID":"3f10744e-fb73-4689-979b-59c32ba0ae6a","Type":"ContainerStarted","Data":"229e677fb2d05704ad59d47691b40836baf74c15d43123dd1916bcbfe6cbdfc7"} Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.265074 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.271234 4910 generic.go:334] "Generic (PLEG): container finished" podID="fba1f4e8-1272-428d-95eb-7e01208f7b97" containerID="326bc5580b1be5088033ece2c528c2ec47df33c236b165c974770c30749b051c" exitCode=0 Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.271361 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" event={"ID":"fba1f4e8-1272-428d-95eb-7e01208f7b97","Type":"ContainerDied","Data":"326bc5580b1be5088033ece2c528c2ec47df33c236b165c974770c30749b051c"} Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.273009 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.282735 4910 generic.go:334] "Generic (PLEG): container finished" podID="0b5ec6d7-57b0-4082-9310-a18457ea9c36" containerID="36b8a649a3e7ed02d84475c333f51423b8c8b2b7d60117b5cd8fcf741b9e6c4b" exitCode=0 Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.282857 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h29m6" event={"ID":"0b5ec6d7-57b0-4082-9310-a18457ea9c36","Type":"ContainerDied","Data":"36b8a649a3e7ed02d84475c333f51423b8c8b2b7d60117b5cd8fcf741b9e6c4b"} Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.297335 4910 generic.go:334] "Generic (PLEG): container finished" podID="26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" containerID="cff3ef328153b2253505b5b04dc06523d33255d27e453ebad175d61f87084c4e" exitCode=0 Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.298168 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5ffq9" event={"ID":"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c","Type":"ContainerDied","Data":"cff3ef328153b2253505b5b04dc06523d33255d27e453ebad175d61f87084c4e"} Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.302687 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-24zsr"] Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.309616 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.309759 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.310104 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-k7kph" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.326492 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-24zsr"] Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.352972 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" podStartSLOduration=132.352949647 podStartE2EDuration="2m12.352949647s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:17.347551971 +0000 UTC m=+152.810028293" watchObservedRunningTime="2025-11-25 21:33:17.352949647 +0000 UTC m=+152.815426139" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.385474 4910 patch_prober.go:28] interesting pod/downloads-7954f5f757-4hf2m container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.385511 4910 patch_prober.go:28] interesting pod/downloads-7954f5f757-4hf2m container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" start-of-body= Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.385548 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4hf2m" podUID="761b3955-fd92-419a-934c-31f294bbecde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.385602 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-4hf2m" podUID="761b3955-fd92-419a-934c-31f294bbecde" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.10:8080/\": dial tcp 10.217.0.10:8080: connect: connection refused" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.429191 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5f96j\" (UniqueName: \"kubernetes.io/projected/7d960cd2-2535-4ce4-b977-a7c936d956f2-kube-api-access-5f96j\") pod \"redhat-marketplace-24zsr\" (UID: \"7d960cd2-2535-4ce4-b977-a7c936d956f2\") " pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.429340 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d960cd2-2535-4ce4-b977-a7c936d956f2-utilities\") pod \"redhat-marketplace-24zsr\" (UID: \"7d960cd2-2535-4ce4-b977-a7c936d956f2\") " pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.429374 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d960cd2-2535-4ce4-b977-a7c936d956f2-catalog-content\") pod \"redhat-marketplace-24zsr\" (UID: \"7d960cd2-2535-4ce4-b977-a7c936d956f2\") " pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.493789 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.493829 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.518438 4910 patch_prober.go:28] interesting pod/console-f9d7485db-shg9w container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.26:8443/health\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.518832 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-shg9w" podUID="fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e" containerName="console" probeResult="failure" output="Get \"https://10.217.0.26:8443/health\": dial tcp 10.217.0.26:8443: connect: connection refused" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.542694 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d960cd2-2535-4ce4-b977-a7c936d956f2-utilities\") pod \"redhat-marketplace-24zsr\" (UID: \"7d960cd2-2535-4ce4-b977-a7c936d956f2\") " pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.542727 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d960cd2-2535-4ce4-b977-a7c936d956f2-catalog-content\") pod \"redhat-marketplace-24zsr\" (UID: \"7d960cd2-2535-4ce4-b977-a7c936d956f2\") " pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.542824 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5f96j\" (UniqueName: \"kubernetes.io/projected/7d960cd2-2535-4ce4-b977-a7c936d956f2-kube-api-access-5f96j\") pod \"redhat-marketplace-24zsr\" (UID: \"7d960cd2-2535-4ce4-b977-a7c936d956f2\") " pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.543976 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d960cd2-2535-4ce4-b977-a7c936d956f2-utilities\") pod \"redhat-marketplace-24zsr\" (UID: \"7d960cd2-2535-4ce4-b977-a7c936d956f2\") " pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.545209 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d960cd2-2535-4ce4-b977-a7c936d956f2-catalog-content\") pod \"redhat-marketplace-24zsr\" (UID: \"7d960cd2-2535-4ce4-b977-a7c936d956f2\") " pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.570363 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5f96j\" (UniqueName: \"kubernetes.io/projected/7d960cd2-2535-4ce4-b977-a7c936d956f2-kube-api-access-5f96j\") pod \"redhat-marketplace-24zsr\" (UID: \"7d960cd2-2535-4ce4-b977-a7c936d956f2\") " pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.634534 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.710607 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mkr2q"] Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.898526 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xj7n6"] Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.938532 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.938396 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xj7n6"] Nov 25 21:33:17 crc kubenswrapper[4910]: I1125 21:33:17.941710 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.009131 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-24zsr"] Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.050327 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e78dffa-f782-4a5e-a76c-d090263ad82e-catalog-content\") pod \"redhat-operators-xj7n6\" (UID: \"3e78dffa-f782-4a5e-a76c-d090263ad82e\") " pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.050624 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e78dffa-f782-4a5e-a76c-d090263ad82e-utilities\") pod \"redhat-operators-xj7n6\" (UID: \"3e78dffa-f782-4a5e-a76c-d090263ad82e\") " pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.050658 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzt4z\" (UniqueName: \"kubernetes.io/projected/3e78dffa-f782-4a5e-a76c-d090263ad82e-kube-api-access-tzt4z\") pod \"redhat-operators-xj7n6\" (UID: \"3e78dffa-f782-4a5e-a76c-d090263ad82e\") " pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.151967 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e78dffa-f782-4a5e-a76c-d090263ad82e-utilities\") pod \"redhat-operators-xj7n6\" (UID: \"3e78dffa-f782-4a5e-a76c-d090263ad82e\") " pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.152394 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzt4z\" (UniqueName: \"kubernetes.io/projected/3e78dffa-f782-4a5e-a76c-d090263ad82e-kube-api-access-tzt4z\") pod \"redhat-operators-xj7n6\" (UID: \"3e78dffa-f782-4a5e-a76c-d090263ad82e\") " pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.152468 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e78dffa-f782-4a5e-a76c-d090263ad82e-catalog-content\") pod \"redhat-operators-xj7n6\" (UID: \"3e78dffa-f782-4a5e-a76c-d090263ad82e\") " pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.152954 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e78dffa-f782-4a5e-a76c-d090263ad82e-catalog-content\") pod \"redhat-operators-xj7n6\" (UID: \"3e78dffa-f782-4a5e-a76c-d090263ad82e\") " pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.153275 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e78dffa-f782-4a5e-a76c-d090263ad82e-utilities\") pod \"redhat-operators-xj7n6\" (UID: \"3e78dffa-f782-4a5e-a76c-d090263ad82e\") " pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.187551 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.192231 4910 patch_prober.go:28] interesting pod/router-default-5444994796-cfjc4 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 25 21:33:18 crc kubenswrapper[4910]: [-]has-synced failed: reason withheld Nov 25 21:33:18 crc kubenswrapper[4910]: [+]process-running ok Nov 25 21:33:18 crc kubenswrapper[4910]: healthz check failed Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.192329 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-cfjc4" podUID="06539de3-ec9c-42dd-b5cb-c23227463dba" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.197084 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzt4z\" (UniqueName: \"kubernetes.io/projected/3e78dffa-f782-4a5e-a76c-d090263ad82e-kube-api-access-tzt4z\") pod \"redhat-operators-xj7n6\" (UID: \"3e78dffa-f782-4a5e-a76c-d090263ad82e\") " pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.245604 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.309317 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-l9n7q"] Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.310693 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.311391 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.341005 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l9n7q"] Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.344807 4910 generic.go:334] "Generic (PLEG): container finished" podID="7d960cd2-2535-4ce4-b977-a7c936d956f2" containerID="4e5fb999e005e8035bd4ebd001025a7701f5d9db5a01345d5bd63d54825879bd" exitCode=0 Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.344922 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24zsr" event={"ID":"7d960cd2-2535-4ce4-b977-a7c936d956f2","Type":"ContainerDied","Data":"4e5fb999e005e8035bd4ebd001025a7701f5d9db5a01345d5bd63d54825879bd"} Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.344956 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24zsr" event={"ID":"7d960cd2-2535-4ce4-b977-a7c936d956f2","Type":"ContainerStarted","Data":"b4cd2fea8626639fa1c0499226e8831bab846e28bcdc1ed7e3f68a1e7532ec70"} Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.365047 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3d90515-9230-46a3-9b1b-3f629346af0b-catalog-content\") pod \"redhat-operators-l9n7q\" (UID: \"e3d90515-9230-46a3-9b1b-3f629346af0b\") " pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.365148 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3d90515-9230-46a3-9b1b-3f629346af0b-utilities\") pod \"redhat-operators-l9n7q\" (UID: \"e3d90515-9230-46a3-9b1b-3f629346af0b\") " pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.365201 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sv44b\" (UniqueName: \"kubernetes.io/projected/e3d90515-9230-46a3-9b1b-3f629346af0b-kube-api-access-sv44b\") pod \"redhat-operators-l9n7q\" (UID: \"e3d90515-9230-46a3-9b1b-3f629346af0b\") " pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.382444 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"2d3a577f-7a7a-4bad-96ce-1ad95f68aea8","Type":"ContainerStarted","Data":"065b6694be7b696b932fe37c38304f281d27ffedc817951af86ebcd2deb94633"} Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.382485 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"2d3a577f-7a7a-4bad-96ce-1ad95f68aea8","Type":"ContainerStarted","Data":"8f0db2cfc765b0229a1695a5aba32d94566e655d0141c6df2ceec03ace3b3125"} Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.398797 4910 generic.go:334] "Generic (PLEG): container finished" podID="a49a70fd-643c-46d8-9f89-adf75cc92ca9" containerID="84b5d2ad0554363f3b09f340e8de430c715d0de4de6b1f71b1c055c1cb18e4d4" exitCode=0 Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.399302 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mkr2q" event={"ID":"a49a70fd-643c-46d8-9f89-adf75cc92ca9","Type":"ContainerDied","Data":"84b5d2ad0554363f3b09f340e8de430c715d0de4de6b1f71b1c055c1cb18e4d4"} Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.399361 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mkr2q" event={"ID":"a49a70fd-643c-46d8-9f89-adf75cc92ca9","Type":"ContainerStarted","Data":"5ce4f8234dded60ad6ca946ec354f2aa463a4b2e591679c57e38623ab37752b7"} Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.465296 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.465270338 podStartE2EDuration="2.465270338s" podCreationTimestamp="2025-11-25 21:33:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:18.428538514 +0000 UTC m=+153.891014836" watchObservedRunningTime="2025-11-25 21:33:18.465270338 +0000 UTC m=+153.927746660" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.466549 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3d90515-9230-46a3-9b1b-3f629346af0b-utilities\") pod \"redhat-operators-l9n7q\" (UID: \"e3d90515-9230-46a3-9b1b-3f629346af0b\") " pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.466631 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sv44b\" (UniqueName: \"kubernetes.io/projected/e3d90515-9230-46a3-9b1b-3f629346af0b-kube-api-access-sv44b\") pod \"redhat-operators-l9n7q\" (UID: \"e3d90515-9230-46a3-9b1b-3f629346af0b\") " pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.466788 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3d90515-9230-46a3-9b1b-3f629346af0b-catalog-content\") pod \"redhat-operators-l9n7q\" (UID: \"e3d90515-9230-46a3-9b1b-3f629346af0b\") " pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.497322 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3d90515-9230-46a3-9b1b-3f629346af0b-utilities\") pod \"redhat-operators-l9n7q\" (UID: \"e3d90515-9230-46a3-9b1b-3f629346af0b\") " pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.497796 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3d90515-9230-46a3-9b1b-3f629346af0b-catalog-content\") pod \"redhat-operators-l9n7q\" (UID: \"e3d90515-9230-46a3-9b1b-3f629346af0b\") " pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.506085 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sv44b\" (UniqueName: \"kubernetes.io/projected/e3d90515-9230-46a3-9b1b-3f629346af0b-kube-api-access-sv44b\") pod \"redhat-operators-l9n7q\" (UID: \"e3d90515-9230-46a3-9b1b-3f629346af0b\") " pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.647445 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.690176 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.696211 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.699472 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.699764 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.717641 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.778094 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a546c533-1811-487b-8d7b-35bf4aa7140e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a546c533-1811-487b-8d7b-35bf4aa7140e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.778138 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a546c533-1811-487b-8d7b-35bf4aa7140e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a546c533-1811-487b-8d7b-35bf4aa7140e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.879349 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a546c533-1811-487b-8d7b-35bf4aa7140e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a546c533-1811-487b-8d7b-35bf4aa7140e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.879452 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a546c533-1811-487b-8d7b-35bf4aa7140e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a546c533-1811-487b-8d7b-35bf4aa7140e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.880079 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a546c533-1811-487b-8d7b-35bf4aa7140e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"a546c533-1811-487b-8d7b-35bf4aa7140e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.887523 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.917005 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a546c533-1811-487b-8d7b-35bf4aa7140e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"a546c533-1811-487b-8d7b-35bf4aa7140e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.970703 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-l9n7q"] Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.982618 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fba1f4e8-1272-428d-95eb-7e01208f7b97-secret-volume\") pod \"fba1f4e8-1272-428d-95eb-7e01208f7b97\" (UID: \"fba1f4e8-1272-428d-95eb-7e01208f7b97\") " Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.982809 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qlz2m\" (UniqueName: \"kubernetes.io/projected/fba1f4e8-1272-428d-95eb-7e01208f7b97-kube-api-access-qlz2m\") pod \"fba1f4e8-1272-428d-95eb-7e01208f7b97\" (UID: \"fba1f4e8-1272-428d-95eb-7e01208f7b97\") " Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.982839 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fba1f4e8-1272-428d-95eb-7e01208f7b97-config-volume\") pod \"fba1f4e8-1272-428d-95eb-7e01208f7b97\" (UID: \"fba1f4e8-1272-428d-95eb-7e01208f7b97\") " Nov 25 21:33:18 crc kubenswrapper[4910]: I1125 21:33:18.984064 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fba1f4e8-1272-428d-95eb-7e01208f7b97-config-volume" (OuterVolumeSpecName: "config-volume") pod "fba1f4e8-1272-428d-95eb-7e01208f7b97" (UID: "fba1f4e8-1272-428d-95eb-7e01208f7b97"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:33:18 crc kubenswrapper[4910]: W1125 21:33:18.998817 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode3d90515_9230_46a3_9b1b_3f629346af0b.slice/crio-700087422dd2ea142e48c78ecee839fb0693f236622c8e334cb53c3f6c7ee920 WatchSource:0}: Error finding container 700087422dd2ea142e48c78ecee839fb0693f236622c8e334cb53c3f6c7ee920: Status 404 returned error can't find the container with id 700087422dd2ea142e48c78ecee839fb0693f236622c8e334cb53c3f6c7ee920 Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.006768 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fba1f4e8-1272-428d-95eb-7e01208f7b97-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "fba1f4e8-1272-428d-95eb-7e01208f7b97" (UID: "fba1f4e8-1272-428d-95eb-7e01208f7b97"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.007680 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fba1f4e8-1272-428d-95eb-7e01208f7b97-kube-api-access-qlz2m" (OuterVolumeSpecName: "kube-api-access-qlz2m") pod "fba1f4e8-1272-428d-95eb-7e01208f7b97" (UID: "fba1f4e8-1272-428d-95eb-7e01208f7b97"). InnerVolumeSpecName "kube-api-access-qlz2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.061501 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.086448 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qlz2m\" (UniqueName: \"kubernetes.io/projected/fba1f4e8-1272-428d-95eb-7e01208f7b97-kube-api-access-qlz2m\") on node \"crc\" DevicePath \"\"" Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.086476 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/fba1f4e8-1272-428d-95eb-7e01208f7b97-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.086486 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/fba1f4e8-1272-428d-95eb-7e01208f7b97-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.110147 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xj7n6"] Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.198060 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.202407 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-cfjc4" Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.438826 4910 generic.go:334] "Generic (PLEG): container finished" podID="2d3a577f-7a7a-4bad-96ce-1ad95f68aea8" containerID="065b6694be7b696b932fe37c38304f281d27ffedc817951af86ebcd2deb94633" exitCode=0 Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.438893 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"2d3a577f-7a7a-4bad-96ce-1ad95f68aea8","Type":"ContainerDied","Data":"065b6694be7b696b932fe37c38304f281d27ffedc817951af86ebcd2deb94633"} Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.445087 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9n7q" event={"ID":"e3d90515-9230-46a3-9b1b-3f629346af0b","Type":"ContainerStarted","Data":"700087422dd2ea142e48c78ecee839fb0693f236622c8e334cb53c3f6c7ee920"} Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.449596 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.449566 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx" event={"ID":"fba1f4e8-1272-428d-95eb-7e01208f7b97","Type":"ContainerDied","Data":"f97125295f6807118c6ad9197dc2e7df3c5083f7eace9bb58a2ee4c691aab985"} Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.449712 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f97125295f6807118c6ad9197dc2e7df3c5083f7eace9bb58a2ee4c691aab985" Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.458291 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xj7n6" event={"ID":"3e78dffa-f782-4a5e-a76c-d090263ad82e","Type":"ContainerStarted","Data":"a2c6e692ddd207a718332ad0e1e0729148bd48ede90277b4be1fafb2391210aa"} Nov 25 21:33:19 crc kubenswrapper[4910]: I1125 21:33:19.603369 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 25 21:33:20 crc kubenswrapper[4910]: I1125 21:33:20.490567 4910 generic.go:334] "Generic (PLEG): container finished" podID="3e78dffa-f782-4a5e-a76c-d090263ad82e" containerID="50be094ef8501c1f6199c7d52f5f6ca9a7d14e7e4de65e60c7b0abb9ab595869" exitCode=0 Nov 25 21:33:20 crc kubenswrapper[4910]: I1125 21:33:20.490894 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xj7n6" event={"ID":"3e78dffa-f782-4a5e-a76c-d090263ad82e","Type":"ContainerDied","Data":"50be094ef8501c1f6199c7d52f5f6ca9a7d14e7e4de65e60c7b0abb9ab595869"} Nov 25 21:33:20 crc kubenswrapper[4910]: I1125 21:33:20.500505 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a546c533-1811-487b-8d7b-35bf4aa7140e","Type":"ContainerStarted","Data":"a02e3a2d947cdaf6d98ed17736b1eb52caad6469c9fee981f5a3663acaaf1d75"} Nov 25 21:33:20 crc kubenswrapper[4910]: I1125 21:33:20.500572 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a546c533-1811-487b-8d7b-35bf4aa7140e","Type":"ContainerStarted","Data":"72f1bf2b69f5b40e114a345707e7895bbb51cd155eb7ebcfe6e133f6e60f765e"} Nov 25 21:33:20 crc kubenswrapper[4910]: I1125 21:33:20.506689 4910 generic.go:334] "Generic (PLEG): container finished" podID="e3d90515-9230-46a3-9b1b-3f629346af0b" containerID="638609d4b1db8d7949be7ed06753ad33fc6168589addb1eee36d1691b8d2ea7e" exitCode=0 Nov 25 21:33:20 crc kubenswrapper[4910]: I1125 21:33:20.507094 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9n7q" event={"ID":"e3d90515-9230-46a3-9b1b-3f629346af0b","Type":"ContainerDied","Data":"638609d4b1db8d7949be7ed06753ad33fc6168589addb1eee36d1691b8d2ea7e"} Nov 25 21:33:20 crc kubenswrapper[4910]: I1125 21:33:20.559948 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.559922106 podStartE2EDuration="2.559922106s" podCreationTimestamp="2025-11-25 21:33:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:20.55584729 +0000 UTC m=+156.018323612" watchObservedRunningTime="2025-11-25 21:33:20.559922106 +0000 UTC m=+156.022398428" Nov 25 21:33:21 crc kubenswrapper[4910]: I1125 21:33:21.076293 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 21:33:21 crc kubenswrapper[4910]: I1125 21:33:21.143826 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2d3a577f-7a7a-4bad-96ce-1ad95f68aea8-kubelet-dir\") pod \"2d3a577f-7a7a-4bad-96ce-1ad95f68aea8\" (UID: \"2d3a577f-7a7a-4bad-96ce-1ad95f68aea8\") " Nov 25 21:33:21 crc kubenswrapper[4910]: I1125 21:33:21.144065 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2d3a577f-7a7a-4bad-96ce-1ad95f68aea8-kube-api-access\") pod \"2d3a577f-7a7a-4bad-96ce-1ad95f68aea8\" (UID: \"2d3a577f-7a7a-4bad-96ce-1ad95f68aea8\") " Nov 25 21:33:21 crc kubenswrapper[4910]: I1125 21:33:21.146286 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2d3a577f-7a7a-4bad-96ce-1ad95f68aea8-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "2d3a577f-7a7a-4bad-96ce-1ad95f68aea8" (UID: "2d3a577f-7a7a-4bad-96ce-1ad95f68aea8"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:33:21 crc kubenswrapper[4910]: I1125 21:33:21.156700 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d3a577f-7a7a-4bad-96ce-1ad95f68aea8-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "2d3a577f-7a7a-4bad-96ce-1ad95f68aea8" (UID: "2d3a577f-7a7a-4bad-96ce-1ad95f68aea8"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:33:21 crc kubenswrapper[4910]: I1125 21:33:21.246643 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2d3a577f-7a7a-4bad-96ce-1ad95f68aea8-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 21:33:21 crc kubenswrapper[4910]: I1125 21:33:21.246676 4910 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2d3a577f-7a7a-4bad-96ce-1ad95f68aea8-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 21:33:21 crc kubenswrapper[4910]: I1125 21:33:21.538089 4910 generic.go:334] "Generic (PLEG): container finished" podID="a546c533-1811-487b-8d7b-35bf4aa7140e" containerID="a02e3a2d947cdaf6d98ed17736b1eb52caad6469c9fee981f5a3663acaaf1d75" exitCode=0 Nov 25 21:33:21 crc kubenswrapper[4910]: I1125 21:33:21.538280 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a546c533-1811-487b-8d7b-35bf4aa7140e","Type":"ContainerDied","Data":"a02e3a2d947cdaf6d98ed17736b1eb52caad6469c9fee981f5a3663acaaf1d75"} Nov 25 21:33:21 crc kubenswrapper[4910]: I1125 21:33:21.549517 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"2d3a577f-7a7a-4bad-96ce-1ad95f68aea8","Type":"ContainerDied","Data":"8f0db2cfc765b0229a1695a5aba32d94566e655d0141c6df2ceec03ace3b3125"} Nov 25 21:33:21 crc kubenswrapper[4910]: I1125 21:33:21.549564 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8f0db2cfc765b0229a1695a5aba32d94566e655d0141c6df2ceec03ace3b3125" Nov 25 21:33:21 crc kubenswrapper[4910]: I1125 21:33:21.549630 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 25 21:33:23 crc kubenswrapper[4910]: I1125 21:33:23.013977 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 21:33:23 crc kubenswrapper[4910]: I1125 21:33:23.100682 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a546c533-1811-487b-8d7b-35bf4aa7140e-kube-api-access\") pod \"a546c533-1811-487b-8d7b-35bf4aa7140e\" (UID: \"a546c533-1811-487b-8d7b-35bf4aa7140e\") " Nov 25 21:33:23 crc kubenswrapper[4910]: I1125 21:33:23.100793 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a546c533-1811-487b-8d7b-35bf4aa7140e-kubelet-dir\") pod \"a546c533-1811-487b-8d7b-35bf4aa7140e\" (UID: \"a546c533-1811-487b-8d7b-35bf4aa7140e\") " Nov 25 21:33:23 crc kubenswrapper[4910]: I1125 21:33:23.101755 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:33:23 crc kubenswrapper[4910]: I1125 21:33:23.101842 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:33:23 crc kubenswrapper[4910]: I1125 21:33:23.102389 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a546c533-1811-487b-8d7b-35bf4aa7140e-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a546c533-1811-487b-8d7b-35bf4aa7140e" (UID: "a546c533-1811-487b-8d7b-35bf4aa7140e"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:33:23 crc kubenswrapper[4910]: I1125 21:33:23.126148 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a546c533-1811-487b-8d7b-35bf4aa7140e-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a546c533-1811-487b-8d7b-35bf4aa7140e" (UID: "a546c533-1811-487b-8d7b-35bf4aa7140e"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:33:23 crc kubenswrapper[4910]: I1125 21:33:23.203451 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a546c533-1811-487b-8d7b-35bf4aa7140e-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 21:33:23 crc kubenswrapper[4910]: I1125 21:33:23.203500 4910 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a546c533-1811-487b-8d7b-35bf4aa7140e-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 21:33:23 crc kubenswrapper[4910]: I1125 21:33:23.279876 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-qmbhh" Nov 25 21:33:23 crc kubenswrapper[4910]: I1125 21:33:23.594677 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"a546c533-1811-487b-8d7b-35bf4aa7140e","Type":"ContainerDied","Data":"72f1bf2b69f5b40e114a345707e7895bbb51cd155eb7ebcfe6e133f6e60f765e"} Nov 25 21:33:23 crc kubenswrapper[4910]: I1125 21:33:23.594728 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72f1bf2b69f5b40e114a345707e7895bbb51cd155eb7ebcfe6e133f6e60f765e" Nov 25 21:33:23 crc kubenswrapper[4910]: I1125 21:33:23.594847 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 25 21:33:27 crc kubenswrapper[4910]: I1125 21:33:27.404949 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-4hf2m" Nov 25 21:33:27 crc kubenswrapper[4910]: I1125 21:33:27.549690 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:27 crc kubenswrapper[4910]: I1125 21:33:27.554375 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:33:28 crc kubenswrapper[4910]: I1125 21:33:28.911853 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs\") pod \"network-metrics-daemon-m4q5p\" (UID: \"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\") " pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:33:28 crc kubenswrapper[4910]: I1125 21:33:28.918956 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/72d787b6-8fd2-4a83-9e8f-2654fdad81c9-metrics-certs\") pod \"network-metrics-daemon-m4q5p\" (UID: \"72d787b6-8fd2-4a83-9e8f-2654fdad81c9\") " pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:33:29 crc kubenswrapper[4910]: I1125 21:33:29.118038 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-m4q5p" Nov 25 21:33:35 crc kubenswrapper[4910]: I1125 21:33:35.601962 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.399226 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-m4q5p"] Nov 25 21:33:44 crc kubenswrapper[4910]: W1125 21:33:44.464330 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod72d787b6_8fd2_4a83_9e8f_2654fdad81c9.slice/crio-15b38e30136018b3ab62dadc66529e23c333851363e9dd25b27f4285e5503206 WatchSource:0}: Error finding container 15b38e30136018b3ab62dadc66529e23c333851363e9dd25b27f4285e5503206: Status 404 returned error can't find the container with id 15b38e30136018b3ab62dadc66529e23c333851363e9dd25b27f4285e5503206 Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.756563 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9n7q" event={"ID":"e3d90515-9230-46a3-9b1b-3f629346af0b","Type":"ContainerStarted","Data":"f94834024a51fdfad2c6a2b968f0de0bd117d297b7363f98b695068800a2e2c0"} Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.759189 4910 generic.go:334] "Generic (PLEG): container finished" podID="0b5ec6d7-57b0-4082-9310-a18457ea9c36" containerID="18b2f9393b35207928289eb9d3b2672b66a00642e9da3c476c05bdbcf27d2888" exitCode=0 Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.759258 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h29m6" event={"ID":"0b5ec6d7-57b0-4082-9310-a18457ea9c36","Type":"ContainerDied","Data":"18b2f9393b35207928289eb9d3b2672b66a00642e9da3c476c05bdbcf27d2888"} Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.761696 4910 generic.go:334] "Generic (PLEG): container finished" podID="7d960cd2-2535-4ce4-b977-a7c936d956f2" containerID="036d58be4be700919b655b4f4a397171449d3a0e281cf0a6c822e775f2549116" exitCode=0 Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.761741 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24zsr" event={"ID":"7d960cd2-2535-4ce4-b977-a7c936d956f2","Type":"ContainerDied","Data":"036d58be4be700919b655b4f4a397171449d3a0e281cf0a6c822e775f2549116"} Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.764890 4910 generic.go:334] "Generic (PLEG): container finished" podID="26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" containerID="3f171b7a81e199032665f7e2908fe69b1fb829a9c3b6e9715fec07b3ad2a36a5" exitCode=0 Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.764967 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5ffq9" event={"ID":"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c","Type":"ContainerDied","Data":"3f171b7a81e199032665f7e2908fe69b1fb829a9c3b6e9715fec07b3ad2a36a5"} Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.770898 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xj7n6" event={"ID":"3e78dffa-f782-4a5e-a76c-d090263ad82e","Type":"ContainerStarted","Data":"b3c344018e3a117c0ee651ab6cdc9dfe469146c0278b99115f9abde0cd2d060f"} Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.772057 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" event={"ID":"72d787b6-8fd2-4a83-9e8f-2654fdad81c9","Type":"ContainerStarted","Data":"15b38e30136018b3ab62dadc66529e23c333851363e9dd25b27f4285e5503206"} Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.773375 4910 generic.go:334] "Generic (PLEG): container finished" podID="33a592b6-e1fb-412c-8768-b83a79e08f61" containerID="93a27d61c0e0a18027c16125df79f109854a4487926e0c6c2deb6771f7346792" exitCode=0 Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.773422 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tfpcn" event={"ID":"33a592b6-e1fb-412c-8768-b83a79e08f61","Type":"ContainerDied","Data":"93a27d61c0e0a18027c16125df79f109854a4487926e0c6c2deb6771f7346792"} Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.775341 4910 generic.go:334] "Generic (PLEG): container finished" podID="e450be6d-4615-4cdf-8c24-edc01fa86412" containerID="f2b945e554c63aa2d5a986eeec88af98a863815e033a7f97c4d5cd1e7348d1df" exitCode=0 Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.775395 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rrwnh" event={"ID":"e450be6d-4615-4cdf-8c24-edc01fa86412","Type":"ContainerDied","Data":"f2b945e554c63aa2d5a986eeec88af98a863815e033a7f97c4d5cd1e7348d1df"} Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.783191 4910 generic.go:334] "Generic (PLEG): container finished" podID="a49a70fd-643c-46d8-9f89-adf75cc92ca9" containerID="1d40fb3fdc6ca0028f92694b5239f6b811737ee70d9d73109afc7b677f8ff8b5" exitCode=0 Nov 25 21:33:44 crc kubenswrapper[4910]: I1125 21:33:44.783257 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mkr2q" event={"ID":"a49a70fd-643c-46d8-9f89-adf75cc92ca9","Type":"ContainerDied","Data":"1d40fb3fdc6ca0028f92694b5239f6b811737ee70d9d73109afc7b677f8ff8b5"} Nov 25 21:33:45 crc kubenswrapper[4910]: I1125 21:33:45.715746 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-jqcq6"] Nov 25 21:33:45 crc kubenswrapper[4910]: I1125 21:33:45.792490 4910 generic.go:334] "Generic (PLEG): container finished" podID="3e78dffa-f782-4a5e-a76c-d090263ad82e" containerID="b3c344018e3a117c0ee651ab6cdc9dfe469146c0278b99115f9abde0cd2d060f" exitCode=0 Nov 25 21:33:45 crc kubenswrapper[4910]: I1125 21:33:45.792566 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xj7n6" event={"ID":"3e78dffa-f782-4a5e-a76c-d090263ad82e","Type":"ContainerDied","Data":"b3c344018e3a117c0ee651ab6cdc9dfe469146c0278b99115f9abde0cd2d060f"} Nov 25 21:33:45 crc kubenswrapper[4910]: I1125 21:33:45.798227 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" event={"ID":"72d787b6-8fd2-4a83-9e8f-2654fdad81c9","Type":"ContainerStarted","Data":"506ef799fc2065bcbcc73c2a631cdf2eee76d8ab8a98feb253fde4e08da9b905"} Nov 25 21:33:45 crc kubenswrapper[4910]: I1125 21:33:45.798284 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-m4q5p" event={"ID":"72d787b6-8fd2-4a83-9e8f-2654fdad81c9","Type":"ContainerStarted","Data":"af87e284ae3e60ec2a056d46d9fb3a0a9f141a455f81d07eaece30f0e5832b01"} Nov 25 21:33:45 crc kubenswrapper[4910]: I1125 21:33:45.800099 4910 generic.go:334] "Generic (PLEG): container finished" podID="e3d90515-9230-46a3-9b1b-3f629346af0b" containerID="f94834024a51fdfad2c6a2b968f0de0bd117d297b7363f98b695068800a2e2c0" exitCode=0 Nov 25 21:33:45 crc kubenswrapper[4910]: I1125 21:33:45.800132 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9n7q" event={"ID":"e3d90515-9230-46a3-9b1b-3f629346af0b","Type":"ContainerDied","Data":"f94834024a51fdfad2c6a2b968f0de0bd117d297b7363f98b695068800a2e2c0"} Nov 25 21:33:45 crc kubenswrapper[4910]: I1125 21:33:45.831507 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-m4q5p" podStartSLOduration=160.83149086 podStartE2EDuration="2m40.83149086s" podCreationTimestamp="2025-11-25 21:31:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:33:45.830935753 +0000 UTC m=+181.293412075" watchObservedRunningTime="2025-11-25 21:33:45.83149086 +0000 UTC m=+181.293967182" Nov 25 21:33:47 crc kubenswrapper[4910]: I1125 21:33:47.812669 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24zsr" event={"ID":"7d960cd2-2535-4ce4-b977-a7c936d956f2","Type":"ContainerStarted","Data":"62461a18ea1e605b2e2d0b62baca8e1f123a1875e166ea0ef7dfc0b75f9ad374"} Nov 25 21:33:48 crc kubenswrapper[4910]: I1125 21:33:48.207504 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rclxw" Nov 25 21:33:48 crc kubenswrapper[4910]: I1125 21:33:48.841200 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-24zsr" podStartSLOduration=3.158267929 podStartE2EDuration="31.84117274s" podCreationTimestamp="2025-11-25 21:33:17 +0000 UTC" firstStartedPulling="2025-11-25 21:33:18.398077454 +0000 UTC m=+153.860553766" lastFinishedPulling="2025-11-25 21:33:47.080982255 +0000 UTC m=+182.543458577" observedRunningTime="2025-11-25 21:33:48.836196757 +0000 UTC m=+184.298673079" watchObservedRunningTime="2025-11-25 21:33:48.84117274 +0000 UTC m=+184.303649062" Nov 25 21:33:51 crc kubenswrapper[4910]: I1125 21:33:51.834593 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mkr2q" event={"ID":"a49a70fd-643c-46d8-9f89-adf75cc92ca9","Type":"ContainerStarted","Data":"a4100f962ee4fbc16c0d0bd98352c6df5e9c50bd1308eb5a6d6eeec453662063"} Nov 25 21:33:51 crc kubenswrapper[4910]: I1125 21:33:51.837446 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tfpcn" event={"ID":"33a592b6-e1fb-412c-8768-b83a79e08f61","Type":"ContainerStarted","Data":"2c8e621a7a210c9fad645d9838a9b15d5cffe480a3949c7e926dbc92ad94e3a5"} Nov 25 21:33:51 crc kubenswrapper[4910]: I1125 21:33:51.859545 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mkr2q" podStartSLOduration=3.41469188 podStartE2EDuration="35.859527118s" podCreationTimestamp="2025-11-25 21:33:16 +0000 UTC" firstStartedPulling="2025-11-25 21:33:18.418750752 +0000 UTC m=+153.881227064" lastFinishedPulling="2025-11-25 21:33:50.86358598 +0000 UTC m=+186.326062302" observedRunningTime="2025-11-25 21:33:51.857160585 +0000 UTC m=+187.319636907" watchObservedRunningTime="2025-11-25 21:33:51.859527118 +0000 UTC m=+187.322003440" Nov 25 21:33:51 crc kubenswrapper[4910]: I1125 21:33:51.879510 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tfpcn" podStartSLOduration=4.650553949 podStartE2EDuration="36.879494764s" podCreationTimestamp="2025-11-25 21:33:15 +0000 UTC" firstStartedPulling="2025-11-25 21:33:17.256564332 +0000 UTC m=+152.719040654" lastFinishedPulling="2025-11-25 21:33:49.485505147 +0000 UTC m=+184.947981469" observedRunningTime="2025-11-25 21:33:51.877489192 +0000 UTC m=+187.339965504" watchObservedRunningTime="2025-11-25 21:33:51.879494764 +0000 UTC m=+187.341971086" Nov 25 21:33:53 crc kubenswrapper[4910]: I1125 21:33:53.098968 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:33:53 crc kubenswrapper[4910]: I1125 21:33:53.099324 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:33:53 crc kubenswrapper[4910]: I1125 21:33:53.687103 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 25 21:33:55 crc kubenswrapper[4910]: I1125 21:33:55.669511 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:55 crc kubenswrapper[4910]: I1125 21:33:55.669583 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:56 crc kubenswrapper[4910]: I1125 21:33:56.231748 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:56 crc kubenswrapper[4910]: I1125 21:33:56.299403 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:56 crc kubenswrapper[4910]: I1125 21:33:56.871420 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h29m6" event={"ID":"0b5ec6d7-57b0-4082-9310-a18457ea9c36","Type":"ContainerStarted","Data":"575d0c9c0f7eddd6cbad4f6ff99ec24e7d62cc20dd7f2d360fe3f8917217bbc1"} Nov 25 21:33:56 crc kubenswrapper[4910]: I1125 21:33:56.877581 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5ffq9" event={"ID":"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c","Type":"ContainerStarted","Data":"9e450963785ecfac63be7b08126cc1c87702e8f3a4cde44d3a024e4ade7f6fbe"} Nov 25 21:33:56 crc kubenswrapper[4910]: I1125 21:33:56.881114 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xj7n6" event={"ID":"3e78dffa-f782-4a5e-a76c-d090263ad82e","Type":"ContainerStarted","Data":"962dcdd9bcfab281e26eceebd0059c6208c0294e21815433f56d90275f4572a2"} Nov 25 21:33:56 crc kubenswrapper[4910]: I1125 21:33:56.883906 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9n7q" event={"ID":"e3d90515-9230-46a3-9b1b-3f629346af0b","Type":"ContainerStarted","Data":"334457e9527a781a272f2615b3e079755384478dd3d64f2abb84f0b05b83d97d"} Nov 25 21:33:56 crc kubenswrapper[4910]: I1125 21:33:56.888233 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rrwnh" event={"ID":"e450be6d-4615-4cdf-8c24-edc01fa86412","Type":"ContainerStarted","Data":"a42745fe7d382afd626c028d11bb7bc4fe0e89c06115df7e8468f68cf84f32d8"} Nov 25 21:33:56 crc kubenswrapper[4910]: I1125 21:33:56.901761 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-h29m6" podStartSLOduration=4.509729749 podStartE2EDuration="42.90173805s" podCreationTimestamp="2025-11-25 21:33:14 +0000 UTC" firstStartedPulling="2025-11-25 21:33:16.206103251 +0000 UTC m=+151.668579573" lastFinishedPulling="2025-11-25 21:33:54.598111552 +0000 UTC m=+190.060587874" observedRunningTime="2025-11-25 21:33:56.891804353 +0000 UTC m=+192.354280675" watchObservedRunningTime="2025-11-25 21:33:56.90173805 +0000 UTC m=+192.364214372" Nov 25 21:33:56 crc kubenswrapper[4910]: I1125 21:33:56.919756 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-l9n7q" podStartSLOduration=5.713000351 podStartE2EDuration="38.919740325s" podCreationTimestamp="2025-11-25 21:33:18 +0000 UTC" firstStartedPulling="2025-11-25 21:33:20.509159879 +0000 UTC m=+155.971636191" lastFinishedPulling="2025-11-25 21:33:53.715899813 +0000 UTC m=+189.178376165" observedRunningTime="2025-11-25 21:33:56.918633401 +0000 UTC m=+192.381109743" watchObservedRunningTime="2025-11-25 21:33:56.919740325 +0000 UTC m=+192.382216647" Nov 25 21:33:56 crc kubenswrapper[4910]: I1125 21:33:56.977604 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5ffq9" podStartSLOduration=4.483933404 podStartE2EDuration="42.977586021s" podCreationTimestamp="2025-11-25 21:33:14 +0000 UTC" firstStartedPulling="2025-11-25 21:33:17.320868907 +0000 UTC m=+152.783345229" lastFinishedPulling="2025-11-25 21:33:55.814521524 +0000 UTC m=+191.276997846" observedRunningTime="2025-11-25 21:33:56.948236375 +0000 UTC m=+192.410712707" watchObservedRunningTime="2025-11-25 21:33:56.977586021 +0000 UTC m=+192.440062343" Nov 25 21:33:56 crc kubenswrapper[4910]: I1125 21:33:56.987804 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rrwnh" podStartSLOduration=3.725482349 podStartE2EDuration="41.987788696s" podCreationTimestamp="2025-11-25 21:33:15 +0000 UTC" firstStartedPulling="2025-11-25 21:33:17.257494611 +0000 UTC m=+152.719970933" lastFinishedPulling="2025-11-25 21:33:55.519800958 +0000 UTC m=+190.982277280" observedRunningTime="2025-11-25 21:33:56.977866169 +0000 UTC m=+192.440342491" watchObservedRunningTime="2025-11-25 21:33:56.987788696 +0000 UTC m=+192.450265018" Nov 25 21:33:56 crc kubenswrapper[4910]: I1125 21:33:56.991393 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tfpcn"] Nov 25 21:33:57 crc kubenswrapper[4910]: I1125 21:33:57.002429 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xj7n6" podStartSLOduration=4.976962042 podStartE2EDuration="40.002412447s" podCreationTimestamp="2025-11-25 21:33:17 +0000 UTC" firstStartedPulling="2025-11-25 21:33:20.493471525 +0000 UTC m=+155.955947847" lastFinishedPulling="2025-11-25 21:33:55.51892193 +0000 UTC m=+190.981398252" observedRunningTime="2025-11-25 21:33:57.000326613 +0000 UTC m=+192.462802945" watchObservedRunningTime="2025-11-25 21:33:57.002412447 +0000 UTC m=+192.464888769" Nov 25 21:33:57 crc kubenswrapper[4910]: I1125 21:33:57.273661 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:57 crc kubenswrapper[4910]: I1125 21:33:57.273739 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:57 crc kubenswrapper[4910]: I1125 21:33:57.316523 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:57 crc kubenswrapper[4910]: I1125 21:33:57.636957 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:57 crc kubenswrapper[4910]: I1125 21:33:57.637016 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:57 crc kubenswrapper[4910]: I1125 21:33:57.683282 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:57 crc kubenswrapper[4910]: I1125 21:33:57.895040 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tfpcn" podUID="33a592b6-e1fb-412c-8768-b83a79e08f61" containerName="registry-server" containerID="cri-o://2c8e621a7a210c9fad645d9838a9b15d5cffe480a3949c7e926dbc92ad94e3a5" gracePeriod=2 Nov 25 21:33:57 crc kubenswrapper[4910]: I1125 21:33:57.937716 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:33:57 crc kubenswrapper[4910]: I1125 21:33:57.939909 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:33:58 crc kubenswrapper[4910]: I1125 21:33:58.311305 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:33:58 crc kubenswrapper[4910]: I1125 21:33:58.311490 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:33:58 crc kubenswrapper[4910]: I1125 21:33:58.649292 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:33:58 crc kubenswrapper[4910]: I1125 21:33:58.649353 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:33:58 crc kubenswrapper[4910]: I1125 21:33:58.913573 4910 generic.go:334] "Generic (PLEG): container finished" podID="33a592b6-e1fb-412c-8768-b83a79e08f61" containerID="2c8e621a7a210c9fad645d9838a9b15d5cffe480a3949c7e926dbc92ad94e3a5" exitCode=0 Nov 25 21:33:58 crc kubenswrapper[4910]: I1125 21:33:58.913712 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tfpcn" event={"ID":"33a592b6-e1fb-412c-8768-b83a79e08f61","Type":"ContainerDied","Data":"2c8e621a7a210c9fad645d9838a9b15d5cffe480a3949c7e926dbc92ad94e3a5"} Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.101304 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.215717 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zvlt\" (UniqueName: \"kubernetes.io/projected/33a592b6-e1fb-412c-8768-b83a79e08f61-kube-api-access-4zvlt\") pod \"33a592b6-e1fb-412c-8768-b83a79e08f61\" (UID: \"33a592b6-e1fb-412c-8768-b83a79e08f61\") " Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.216140 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33a592b6-e1fb-412c-8768-b83a79e08f61-catalog-content\") pod \"33a592b6-e1fb-412c-8768-b83a79e08f61\" (UID: \"33a592b6-e1fb-412c-8768-b83a79e08f61\") " Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.216183 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33a592b6-e1fb-412c-8768-b83a79e08f61-utilities\") pod \"33a592b6-e1fb-412c-8768-b83a79e08f61\" (UID: \"33a592b6-e1fb-412c-8768-b83a79e08f61\") " Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.217173 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33a592b6-e1fb-412c-8768-b83a79e08f61-utilities" (OuterVolumeSpecName: "utilities") pod "33a592b6-e1fb-412c-8768-b83a79e08f61" (UID: "33a592b6-e1fb-412c-8768-b83a79e08f61"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.232693 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33a592b6-e1fb-412c-8768-b83a79e08f61-kube-api-access-4zvlt" (OuterVolumeSpecName: "kube-api-access-4zvlt") pod "33a592b6-e1fb-412c-8768-b83a79e08f61" (UID: "33a592b6-e1fb-412c-8768-b83a79e08f61"). InnerVolumeSpecName "kube-api-access-4zvlt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.271121 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33a592b6-e1fb-412c-8768-b83a79e08f61-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "33a592b6-e1fb-412c-8768-b83a79e08f61" (UID: "33a592b6-e1fb-412c-8768-b83a79e08f61"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.317739 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/33a592b6-e1fb-412c-8768-b83a79e08f61-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.317776 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/33a592b6-e1fb-412c-8768-b83a79e08f61-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.317786 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zvlt\" (UniqueName: \"kubernetes.io/projected/33a592b6-e1fb-412c-8768-b83a79e08f61-kube-api-access-4zvlt\") on node \"crc\" DevicePath \"\"" Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.354857 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-xj7n6" podUID="3e78dffa-f782-4a5e-a76c-d090263ad82e" containerName="registry-server" probeResult="failure" output=< Nov 25 21:33:59 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Nov 25 21:33:59 crc kubenswrapper[4910]: > Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.683517 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-l9n7q" podUID="e3d90515-9230-46a3-9b1b-3f629346af0b" containerName="registry-server" probeResult="failure" output=< Nov 25 21:33:59 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Nov 25 21:33:59 crc kubenswrapper[4910]: > Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.925530 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tfpcn" Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.925619 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tfpcn" event={"ID":"33a592b6-e1fb-412c-8768-b83a79e08f61","Type":"ContainerDied","Data":"1d9f176b3969dd7d45a690d37428842caf35c053e35e3f5f6fbfd98859933d08"} Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.925672 4910 scope.go:117] "RemoveContainer" containerID="2c8e621a7a210c9fad645d9838a9b15d5cffe480a3949c7e926dbc92ad94e3a5" Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.942604 4910 scope.go:117] "RemoveContainer" containerID="93a27d61c0e0a18027c16125df79f109854a4487926e0c6c2deb6771f7346792" Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.957408 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tfpcn"] Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.960233 4910 scope.go:117] "RemoveContainer" containerID="18f5f4b1758207006544f331ef45146cb88f661770b2389b5efa22525deb101b" Nov 25 21:33:59 crc kubenswrapper[4910]: I1125 21:33:59.963728 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tfpcn"] Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.492616 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 21:34:00 crc kubenswrapper[4910]: E1125 21:34:00.492869 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33a592b6-e1fb-412c-8768-b83a79e08f61" containerName="registry-server" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.492883 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="33a592b6-e1fb-412c-8768-b83a79e08f61" containerName="registry-server" Nov 25 21:34:00 crc kubenswrapper[4910]: E1125 21:34:00.492895 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a546c533-1811-487b-8d7b-35bf4aa7140e" containerName="pruner" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.492900 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a546c533-1811-487b-8d7b-35bf4aa7140e" containerName="pruner" Nov 25 21:34:00 crc kubenswrapper[4910]: E1125 21:34:00.492910 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fba1f4e8-1272-428d-95eb-7e01208f7b97" containerName="collect-profiles" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.492916 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="fba1f4e8-1272-428d-95eb-7e01208f7b97" containerName="collect-profiles" Nov 25 21:34:00 crc kubenswrapper[4910]: E1125 21:34:00.492929 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33a592b6-e1fb-412c-8768-b83a79e08f61" containerName="extract-utilities" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.492936 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="33a592b6-e1fb-412c-8768-b83a79e08f61" containerName="extract-utilities" Nov 25 21:34:00 crc kubenswrapper[4910]: E1125 21:34:00.492944 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d3a577f-7a7a-4bad-96ce-1ad95f68aea8" containerName="pruner" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.492950 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d3a577f-7a7a-4bad-96ce-1ad95f68aea8" containerName="pruner" Nov 25 21:34:00 crc kubenswrapper[4910]: E1125 21:34:00.492956 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33a592b6-e1fb-412c-8768-b83a79e08f61" containerName="extract-content" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.492963 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="33a592b6-e1fb-412c-8768-b83a79e08f61" containerName="extract-content" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.493090 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="fba1f4e8-1272-428d-95eb-7e01208f7b97" containerName="collect-profiles" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.493107 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d3a577f-7a7a-4bad-96ce-1ad95f68aea8" containerName="pruner" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.493116 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="33a592b6-e1fb-412c-8768-b83a79e08f61" containerName="registry-server" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.493125 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a546c533-1811-487b-8d7b-35bf4aa7140e" containerName="pruner" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.493565 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.498073 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.498340 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.505109 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.533496 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.533640 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.635306 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.635414 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.635444 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.652926 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 21:34:00 crc kubenswrapper[4910]: I1125 21:34:00.847403 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 21:34:01 crc kubenswrapper[4910]: I1125 21:34:01.209983 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33a592b6-e1fb-412c-8768-b83a79e08f61" path="/var/lib/kubelet/pods/33a592b6-e1fb-412c-8768-b83a79e08f61/volumes" Nov 25 21:34:01 crc kubenswrapper[4910]: I1125 21:34:01.281007 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 25 21:34:01 crc kubenswrapper[4910]: I1125 21:34:01.389290 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-24zsr"] Nov 25 21:34:01 crc kubenswrapper[4910]: I1125 21:34:01.389754 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-24zsr" podUID="7d960cd2-2535-4ce4-b977-a7c936d956f2" containerName="registry-server" containerID="cri-o://62461a18ea1e605b2e2d0b62baca8e1f123a1875e166ea0ef7dfc0b75f9ad374" gracePeriod=2 Nov 25 21:34:01 crc kubenswrapper[4910]: I1125 21:34:01.947165 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f","Type":"ContainerStarted","Data":"94dc317c69eb59be6db53e5cd61271612fe9e9cdd26fc65d84c0a62635aa53f1"} Nov 25 21:34:02 crc kubenswrapper[4910]: I1125 21:34:02.955102 4910 generic.go:334] "Generic (PLEG): container finished" podID="7d960cd2-2535-4ce4-b977-a7c936d956f2" containerID="62461a18ea1e605b2e2d0b62baca8e1f123a1875e166ea0ef7dfc0b75f9ad374" exitCode=0 Nov 25 21:34:02 crc kubenswrapper[4910]: I1125 21:34:02.955195 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24zsr" event={"ID":"7d960cd2-2535-4ce4-b977-a7c936d956f2","Type":"ContainerDied","Data":"62461a18ea1e605b2e2d0b62baca8e1f123a1875e166ea0ef7dfc0b75f9ad374"} Nov 25 21:34:02 crc kubenswrapper[4910]: I1125 21:34:02.957326 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f","Type":"ContainerStarted","Data":"677e71a137f705764cebf50b5a103d7b2e98a8859ed8ab0ce983fb9a44493adf"} Nov 25 21:34:02 crc kubenswrapper[4910]: I1125 21:34:02.999437 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=2.999416005 podStartE2EDuration="2.999416005s" podCreationTimestamp="2025-11-25 21:34:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:34:02.999295172 +0000 UTC m=+198.461771514" watchObservedRunningTime="2025-11-25 21:34:02.999416005 +0000 UTC m=+198.461892327" Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.304774 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.474763 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d960cd2-2535-4ce4-b977-a7c936d956f2-catalog-content\") pod \"7d960cd2-2535-4ce4-b977-a7c936d956f2\" (UID: \"7d960cd2-2535-4ce4-b977-a7c936d956f2\") " Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.474823 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5f96j\" (UniqueName: \"kubernetes.io/projected/7d960cd2-2535-4ce4-b977-a7c936d956f2-kube-api-access-5f96j\") pod \"7d960cd2-2535-4ce4-b977-a7c936d956f2\" (UID: \"7d960cd2-2535-4ce4-b977-a7c936d956f2\") " Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.474892 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d960cd2-2535-4ce4-b977-a7c936d956f2-utilities\") pod \"7d960cd2-2535-4ce4-b977-a7c936d956f2\" (UID: \"7d960cd2-2535-4ce4-b977-a7c936d956f2\") " Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.476046 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d960cd2-2535-4ce4-b977-a7c936d956f2-utilities" (OuterVolumeSpecName: "utilities") pod "7d960cd2-2535-4ce4-b977-a7c936d956f2" (UID: "7d960cd2-2535-4ce4-b977-a7c936d956f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.490448 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d960cd2-2535-4ce4-b977-a7c936d956f2-kube-api-access-5f96j" (OuterVolumeSpecName: "kube-api-access-5f96j") pod "7d960cd2-2535-4ce4-b977-a7c936d956f2" (UID: "7d960cd2-2535-4ce4-b977-a7c936d956f2"). InnerVolumeSpecName "kube-api-access-5f96j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.497646 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7d960cd2-2535-4ce4-b977-a7c936d956f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7d960cd2-2535-4ce4-b977-a7c936d956f2" (UID: "7d960cd2-2535-4ce4-b977-a7c936d956f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.576882 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d960cd2-2535-4ce4-b977-a7c936d956f2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.576953 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5f96j\" (UniqueName: \"kubernetes.io/projected/7d960cd2-2535-4ce4-b977-a7c936d956f2-kube-api-access-5f96j\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.577017 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d960cd2-2535-4ce4-b977-a7c936d956f2-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.964798 4910 generic.go:334] "Generic (PLEG): container finished" podID="a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f" containerID="677e71a137f705764cebf50b5a103d7b2e98a8859ed8ab0ce983fb9a44493adf" exitCode=0 Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.964877 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f","Type":"ContainerDied","Data":"677e71a137f705764cebf50b5a103d7b2e98a8859ed8ab0ce983fb9a44493adf"} Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.966560 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-24zsr" event={"ID":"7d960cd2-2535-4ce4-b977-a7c936d956f2","Type":"ContainerDied","Data":"b4cd2fea8626639fa1c0499226e8831bab846e28bcdc1ed7e3f68a1e7532ec70"} Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.966624 4910 scope.go:117] "RemoveContainer" containerID="62461a18ea1e605b2e2d0b62baca8e1f123a1875e166ea0ef7dfc0b75f9ad374" Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.966780 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-24zsr" Nov 25 21:34:03 crc kubenswrapper[4910]: I1125 21:34:03.987434 4910 scope.go:117] "RemoveContainer" containerID="036d58be4be700919b655b4f4a397171449d3a0e281cf0a6c822e775f2549116" Nov 25 21:34:04 crc kubenswrapper[4910]: I1125 21:34:04.003400 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-24zsr"] Nov 25 21:34:04 crc kubenswrapper[4910]: I1125 21:34:04.004588 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-24zsr"] Nov 25 21:34:04 crc kubenswrapper[4910]: I1125 21:34:04.005569 4910 scope.go:117] "RemoveContainer" containerID="4e5fb999e005e8035bd4ebd001025a7701f5d9db5a01345d5bd63d54825879bd" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.077723 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.078013 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.125862 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.212007 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d960cd2-2535-4ce4-b977-a7c936d956f2" path="/var/lib/kubelet/pods/7d960cd2-2535-4ce4-b977-a7c936d956f2/volumes" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.235186 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.316344 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.316415 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.377313 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.401995 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f-kube-api-access\") pod \"a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f\" (UID: \"a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f\") " Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.402156 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f-kubelet-dir\") pod \"a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f\" (UID: \"a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f\") " Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.402469 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f" (UID: "a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.409819 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f" (UID: "a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.438533 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.438597 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.478156 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.503864 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.503917 4910 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.981906 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f","Type":"ContainerDied","Data":"94dc317c69eb59be6db53e5cd61271612fe9e9cdd26fc65d84c0a62635aa53f1"} Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.981967 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 25 21:34:05 crc kubenswrapper[4910]: I1125 21:34:05.981970 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="94dc317c69eb59be6db53e5cd61271612fe9e9cdd26fc65d84c0a62635aa53f1" Nov 25 21:34:06 crc kubenswrapper[4910]: I1125 21:34:06.025063 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:34:06 crc kubenswrapper[4910]: I1125 21:34:06.033875 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:34:06 crc kubenswrapper[4910]: I1125 21:34:06.049992 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:34:07 crc kubenswrapper[4910]: I1125 21:34:07.797222 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rrwnh"] Nov 25 21:34:07 crc kubenswrapper[4910]: I1125 21:34:07.992672 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rrwnh" podUID="e450be6d-4615-4cdf-8c24-edc01fa86412" containerName="registry-server" containerID="cri-o://a42745fe7d382afd626c028d11bb7bc4fe0e89c06115df7e8468f68cf84f32d8" gracePeriod=2 Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.078457 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 21:34:08 crc kubenswrapper[4910]: E1125 21:34:08.079227 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d960cd2-2535-4ce4-b977-a7c936d956f2" containerName="extract-content" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.079259 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d960cd2-2535-4ce4-b977-a7c936d956f2" containerName="extract-content" Nov 25 21:34:08 crc kubenswrapper[4910]: E1125 21:34:08.079271 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d960cd2-2535-4ce4-b977-a7c936d956f2" containerName="registry-server" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.079278 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d960cd2-2535-4ce4-b977-a7c936d956f2" containerName="registry-server" Nov 25 21:34:08 crc kubenswrapper[4910]: E1125 21:34:08.079291 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d960cd2-2535-4ce4-b977-a7c936d956f2" containerName="extract-utilities" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.079297 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d960cd2-2535-4ce4-b977-a7c936d956f2" containerName="extract-utilities" Nov 25 21:34:08 crc kubenswrapper[4910]: E1125 21:34:08.079306 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f" containerName="pruner" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.079311 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f" containerName="pruner" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.079413 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a00e5d0d-1bd0-49eb-a8a0-81ea0cbaf11f" containerName="pruner" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.079425 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d960cd2-2535-4ce4-b977-a7c936d956f2" containerName="registry-server" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.079851 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.084029 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.084212 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.101283 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.168609 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/1790a245-602a-4fe6-b17d-61c4af8359d1-var-lock\") pod \"installer-9-crc\" (UID: \"1790a245-602a-4fe6-b17d-61c4af8359d1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.168683 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1790a245-602a-4fe6-b17d-61c4af8359d1-kube-api-access\") pod \"installer-9-crc\" (UID: \"1790a245-602a-4fe6-b17d-61c4af8359d1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.168758 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1790a245-602a-4fe6-b17d-61c4af8359d1-kubelet-dir\") pod \"installer-9-crc\" (UID: \"1790a245-602a-4fe6-b17d-61c4af8359d1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.269144 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/1790a245-602a-4fe6-b17d-61c4af8359d1-var-lock\") pod \"installer-9-crc\" (UID: \"1790a245-602a-4fe6-b17d-61c4af8359d1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.269513 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1790a245-602a-4fe6-b17d-61c4af8359d1-kube-api-access\") pod \"installer-9-crc\" (UID: \"1790a245-602a-4fe6-b17d-61c4af8359d1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.269362 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/1790a245-602a-4fe6-b17d-61c4af8359d1-var-lock\") pod \"installer-9-crc\" (UID: \"1790a245-602a-4fe6-b17d-61c4af8359d1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.269797 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1790a245-602a-4fe6-b17d-61c4af8359d1-kubelet-dir\") pod \"installer-9-crc\" (UID: \"1790a245-602a-4fe6-b17d-61c4af8359d1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.269964 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1790a245-602a-4fe6-b17d-61c4af8359d1-kubelet-dir\") pod \"installer-9-crc\" (UID: \"1790a245-602a-4fe6-b17d-61c4af8359d1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.292942 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1790a245-602a-4fe6-b17d-61c4af8359d1-kube-api-access\") pod \"installer-9-crc\" (UID: \"1790a245-602a-4fe6-b17d-61c4af8359d1\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.373088 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.419934 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.481699 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.518741 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.576812 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2fpv\" (UniqueName: \"kubernetes.io/projected/e450be6d-4615-4cdf-8c24-edc01fa86412-kube-api-access-n2fpv\") pod \"e450be6d-4615-4cdf-8c24-edc01fa86412\" (UID: \"e450be6d-4615-4cdf-8c24-edc01fa86412\") " Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.576994 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e450be6d-4615-4cdf-8c24-edc01fa86412-catalog-content\") pod \"e450be6d-4615-4cdf-8c24-edc01fa86412\" (UID: \"e450be6d-4615-4cdf-8c24-edc01fa86412\") " Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.580313 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e450be6d-4615-4cdf-8c24-edc01fa86412-utilities\") pod \"e450be6d-4615-4cdf-8c24-edc01fa86412\" (UID: \"e450be6d-4615-4cdf-8c24-edc01fa86412\") " Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.582366 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e450be6d-4615-4cdf-8c24-edc01fa86412-utilities" (OuterVolumeSpecName: "utilities") pod "e450be6d-4615-4cdf-8c24-edc01fa86412" (UID: "e450be6d-4615-4cdf-8c24-edc01fa86412"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.585609 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e450be6d-4615-4cdf-8c24-edc01fa86412-kube-api-access-n2fpv" (OuterVolumeSpecName: "kube-api-access-n2fpv") pod "e450be6d-4615-4cdf-8c24-edc01fa86412" (UID: "e450be6d-4615-4cdf-8c24-edc01fa86412"). InnerVolumeSpecName "kube-api-access-n2fpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.636171 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e450be6d-4615-4cdf-8c24-edc01fa86412-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e450be6d-4615-4cdf-8c24-edc01fa86412" (UID: "e450be6d-4615-4cdf-8c24-edc01fa86412"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.682050 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e450be6d-4615-4cdf-8c24-edc01fa86412-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.682080 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e450be6d-4615-4cdf-8c24-edc01fa86412-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.682091 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2fpv\" (UniqueName: \"kubernetes.io/projected/e450be6d-4615-4cdf-8c24-edc01fa86412-kube-api-access-n2fpv\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.692538 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.746629 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:34:08 crc kubenswrapper[4910]: I1125 21:34:08.907948 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.003486 4910 generic.go:334] "Generic (PLEG): container finished" podID="e450be6d-4615-4cdf-8c24-edc01fa86412" containerID="a42745fe7d382afd626c028d11bb7bc4fe0e89c06115df7e8468f68cf84f32d8" exitCode=0 Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.003570 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rrwnh" event={"ID":"e450be6d-4615-4cdf-8c24-edc01fa86412","Type":"ContainerDied","Data":"a42745fe7d382afd626c028d11bb7bc4fe0e89c06115df7e8468f68cf84f32d8"} Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.003601 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rrwnh" event={"ID":"e450be6d-4615-4cdf-8c24-edc01fa86412","Type":"ContainerDied","Data":"0a253d368ad94081f3d382959d4cc29ffe8083a689d850101c3b18e2b738a6b1"} Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.003624 4910 scope.go:117] "RemoveContainer" containerID="a42745fe7d382afd626c028d11bb7bc4fe0e89c06115df7e8468f68cf84f32d8" Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.003615 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rrwnh" Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.004988 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"1790a245-602a-4fe6-b17d-61c4af8359d1","Type":"ContainerStarted","Data":"3d87d7a05d8725e11047c82ed7f10e9b3f6bb8325a3eca9fa93a2279ea3aa6c5"} Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.034962 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rrwnh"] Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.037060 4910 scope.go:117] "RemoveContainer" containerID="f2b945e554c63aa2d5a986eeec88af98a863815e033a7f97c4d5cd1e7348d1df" Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.041501 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rrwnh"] Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.058194 4910 scope.go:117] "RemoveContainer" containerID="6c098fce5a670f64b853e12769ea01fbfe6af5aa9e0cdf1f062c85bf91e18365" Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.080685 4910 scope.go:117] "RemoveContainer" containerID="a42745fe7d382afd626c028d11bb7bc4fe0e89c06115df7e8468f68cf84f32d8" Nov 25 21:34:09 crc kubenswrapper[4910]: E1125 21:34:09.081197 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a42745fe7d382afd626c028d11bb7bc4fe0e89c06115df7e8468f68cf84f32d8\": container with ID starting with a42745fe7d382afd626c028d11bb7bc4fe0e89c06115df7e8468f68cf84f32d8 not found: ID does not exist" containerID="a42745fe7d382afd626c028d11bb7bc4fe0e89c06115df7e8468f68cf84f32d8" Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.081258 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a42745fe7d382afd626c028d11bb7bc4fe0e89c06115df7e8468f68cf84f32d8"} err="failed to get container status \"a42745fe7d382afd626c028d11bb7bc4fe0e89c06115df7e8468f68cf84f32d8\": rpc error: code = NotFound desc = could not find container \"a42745fe7d382afd626c028d11bb7bc4fe0e89c06115df7e8468f68cf84f32d8\": container with ID starting with a42745fe7d382afd626c028d11bb7bc4fe0e89c06115df7e8468f68cf84f32d8 not found: ID does not exist" Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.081315 4910 scope.go:117] "RemoveContainer" containerID="f2b945e554c63aa2d5a986eeec88af98a863815e033a7f97c4d5cd1e7348d1df" Nov 25 21:34:09 crc kubenswrapper[4910]: E1125 21:34:09.081974 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2b945e554c63aa2d5a986eeec88af98a863815e033a7f97c4d5cd1e7348d1df\": container with ID starting with f2b945e554c63aa2d5a986eeec88af98a863815e033a7f97c4d5cd1e7348d1df not found: ID does not exist" containerID="f2b945e554c63aa2d5a986eeec88af98a863815e033a7f97c4d5cd1e7348d1df" Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.082011 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2b945e554c63aa2d5a986eeec88af98a863815e033a7f97c4d5cd1e7348d1df"} err="failed to get container status \"f2b945e554c63aa2d5a986eeec88af98a863815e033a7f97c4d5cd1e7348d1df\": rpc error: code = NotFound desc = could not find container \"f2b945e554c63aa2d5a986eeec88af98a863815e033a7f97c4d5cd1e7348d1df\": container with ID starting with f2b945e554c63aa2d5a986eeec88af98a863815e033a7f97c4d5cd1e7348d1df not found: ID does not exist" Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.082040 4910 scope.go:117] "RemoveContainer" containerID="6c098fce5a670f64b853e12769ea01fbfe6af5aa9e0cdf1f062c85bf91e18365" Nov 25 21:34:09 crc kubenswrapper[4910]: E1125 21:34:09.082417 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c098fce5a670f64b853e12769ea01fbfe6af5aa9e0cdf1f062c85bf91e18365\": container with ID starting with 6c098fce5a670f64b853e12769ea01fbfe6af5aa9e0cdf1f062c85bf91e18365 not found: ID does not exist" containerID="6c098fce5a670f64b853e12769ea01fbfe6af5aa9e0cdf1f062c85bf91e18365" Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.082444 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c098fce5a670f64b853e12769ea01fbfe6af5aa9e0cdf1f062c85bf91e18365"} err="failed to get container status \"6c098fce5a670f64b853e12769ea01fbfe6af5aa9e0cdf1f062c85bf91e18365\": rpc error: code = NotFound desc = could not find container \"6c098fce5a670f64b853e12769ea01fbfe6af5aa9e0cdf1f062c85bf91e18365\": container with ID starting with 6c098fce5a670f64b853e12769ea01fbfe6af5aa9e0cdf1f062c85bf91e18365 not found: ID does not exist" Nov 25 21:34:09 crc kubenswrapper[4910]: I1125 21:34:09.210969 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e450be6d-4615-4cdf-8c24-edc01fa86412" path="/var/lib/kubelet/pods/e450be6d-4615-4cdf-8c24-edc01fa86412/volumes" Nov 25 21:34:10 crc kubenswrapper[4910]: I1125 21:34:10.015866 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"1790a245-602a-4fe6-b17d-61c4af8359d1","Type":"ContainerStarted","Data":"d2b96ea45a269edd0de939eae3b773be763c0347a84181515ae01dad1723bf8a"} Nov 25 21:34:10 crc kubenswrapper[4910]: I1125 21:34:10.737994 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" podUID="f63a8e93-5656-4edc-9ee5-24314ebf749a" containerName="oauth-openshift" containerID="cri-o://25d97fd1c4f116143560791ce01af7b2ee4823923288042156276eb3ca678b5c" gracePeriod=15 Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.022721 4910 generic.go:334] "Generic (PLEG): container finished" podID="f63a8e93-5656-4edc-9ee5-24314ebf749a" containerID="25d97fd1c4f116143560791ce01af7b2ee4823923288042156276eb3ca678b5c" exitCode=0 Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.022781 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" event={"ID":"f63a8e93-5656-4edc-9ee5-24314ebf749a","Type":"ContainerDied","Data":"25d97fd1c4f116143560791ce01af7b2ee4823923288042156276eb3ca678b5c"} Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.736853 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.767391 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=3.767341309 podStartE2EDuration="3.767341309s" podCreationTimestamp="2025-11-25 21:34:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:34:10.037773654 +0000 UTC m=+205.500249976" watchObservedRunningTime="2025-11-25 21:34:11.767341309 +0000 UTC m=+207.229817631" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.837125 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-router-certs\") pod \"f63a8e93-5656-4edc-9ee5-24314ebf749a\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.837180 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-ocp-branding-template\") pod \"f63a8e93-5656-4edc-9ee5-24314ebf749a\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.837206 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-error\") pod \"f63a8e93-5656-4edc-9ee5-24314ebf749a\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.837270 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-provider-selection\") pod \"f63a8e93-5656-4edc-9ee5-24314ebf749a\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.837336 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-audit-policies\") pod \"f63a8e93-5656-4edc-9ee5-24314ebf749a\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.837384 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-flhmw\" (UniqueName: \"kubernetes.io/projected/f63a8e93-5656-4edc-9ee5-24314ebf749a-kube-api-access-flhmw\") pod \"f63a8e93-5656-4edc-9ee5-24314ebf749a\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.837419 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-serving-cert\") pod \"f63a8e93-5656-4edc-9ee5-24314ebf749a\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.837451 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-trusted-ca-bundle\") pod \"f63a8e93-5656-4edc-9ee5-24314ebf749a\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.837481 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-login\") pod \"f63a8e93-5656-4edc-9ee5-24314ebf749a\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.837524 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f63a8e93-5656-4edc-9ee5-24314ebf749a-audit-dir\") pod \"f63a8e93-5656-4edc-9ee5-24314ebf749a\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.837575 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f63a8e93-5656-4edc-9ee5-24314ebf749a-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f63a8e93-5656-4edc-9ee5-24314ebf749a" (UID: "f63a8e93-5656-4edc-9ee5-24314ebf749a"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.838970 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-session\") pod \"f63a8e93-5656-4edc-9ee5-24314ebf749a\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.839086 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-idp-0-file-data\") pod \"f63a8e93-5656-4edc-9ee5-24314ebf749a\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.839168 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-service-ca\") pod \"f63a8e93-5656-4edc-9ee5-24314ebf749a\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.839215 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-cliconfig\") pod \"f63a8e93-5656-4edc-9ee5-24314ebf749a\" (UID: \"f63a8e93-5656-4edc-9ee5-24314ebf749a\") " Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.840477 4910 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f63a8e93-5656-4edc-9ee5-24314ebf749a-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.840486 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "f63a8e93-5656-4edc-9ee5-24314ebf749a" (UID: "f63a8e93-5656-4edc-9ee5-24314ebf749a"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.841120 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "f63a8e93-5656-4edc-9ee5-24314ebf749a" (UID: "f63a8e93-5656-4edc-9ee5-24314ebf749a"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.840897 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "f63a8e93-5656-4edc-9ee5-24314ebf749a" (UID: "f63a8e93-5656-4edc-9ee5-24314ebf749a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.841670 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "f63a8e93-5656-4edc-9ee5-24314ebf749a" (UID: "f63a8e93-5656-4edc-9ee5-24314ebf749a"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.847843 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "f63a8e93-5656-4edc-9ee5-24314ebf749a" (UID: "f63a8e93-5656-4edc-9ee5-24314ebf749a"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.853455 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "f63a8e93-5656-4edc-9ee5-24314ebf749a" (UID: "f63a8e93-5656-4edc-9ee5-24314ebf749a"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.854091 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f63a8e93-5656-4edc-9ee5-24314ebf749a-kube-api-access-flhmw" (OuterVolumeSpecName: "kube-api-access-flhmw") pod "f63a8e93-5656-4edc-9ee5-24314ebf749a" (UID: "f63a8e93-5656-4edc-9ee5-24314ebf749a"). InnerVolumeSpecName "kube-api-access-flhmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.854668 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "f63a8e93-5656-4edc-9ee5-24314ebf749a" (UID: "f63a8e93-5656-4edc-9ee5-24314ebf749a"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.854958 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "f63a8e93-5656-4edc-9ee5-24314ebf749a" (UID: "f63a8e93-5656-4edc-9ee5-24314ebf749a"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.855014 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "f63a8e93-5656-4edc-9ee5-24314ebf749a" (UID: "f63a8e93-5656-4edc-9ee5-24314ebf749a"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.855398 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "f63a8e93-5656-4edc-9ee5-24314ebf749a" (UID: "f63a8e93-5656-4edc-9ee5-24314ebf749a"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.855738 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "f63a8e93-5656-4edc-9ee5-24314ebf749a" (UID: "f63a8e93-5656-4edc-9ee5-24314ebf749a"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.857999 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "f63a8e93-5656-4edc-9ee5-24314ebf749a" (UID: "f63a8e93-5656-4edc-9ee5-24314ebf749a"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.942428 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.942490 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.942516 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.942537 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.942558 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.942579 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.942602 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.942625 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.942652 4910 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.942671 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-flhmw\" (UniqueName: \"kubernetes.io/projected/f63a8e93-5656-4edc-9ee5-24314ebf749a-kube-api-access-flhmw\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.942694 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.942715 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:11 crc kubenswrapper[4910]: I1125 21:34:11.942737 4910 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/f63a8e93-5656-4edc-9ee5-24314ebf749a-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:12 crc kubenswrapper[4910]: I1125 21:34:12.029998 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" event={"ID":"f63a8e93-5656-4edc-9ee5-24314ebf749a","Type":"ContainerDied","Data":"3175deeb69168b355e99e1a1eb2b5077f65427ae1595a0fa5277d6f8c7056a8a"} Nov 25 21:34:12 crc kubenswrapper[4910]: I1125 21:34:12.030060 4910 scope.go:117] "RemoveContainer" containerID="25d97fd1c4f116143560791ce01af7b2ee4823923288042156276eb3ca678b5c" Nov 25 21:34:12 crc kubenswrapper[4910]: I1125 21:34:12.030097 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-jqcq6" Nov 25 21:34:12 crc kubenswrapper[4910]: I1125 21:34:12.082729 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-jqcq6"] Nov 25 21:34:12 crc kubenswrapper[4910]: I1125 21:34:12.087025 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-jqcq6"] Nov 25 21:34:12 crc kubenswrapper[4910]: I1125 21:34:12.192051 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l9n7q"] Nov 25 21:34:12 crc kubenswrapper[4910]: I1125 21:34:12.192554 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-l9n7q" podUID="e3d90515-9230-46a3-9b1b-3f629346af0b" containerName="registry-server" containerID="cri-o://334457e9527a781a272f2615b3e079755384478dd3d64f2abb84f0b05b83d97d" gracePeriod=2 Nov 25 21:34:13 crc kubenswrapper[4910]: I1125 21:34:13.037995 4910 generic.go:334] "Generic (PLEG): container finished" podID="e3d90515-9230-46a3-9b1b-3f629346af0b" containerID="334457e9527a781a272f2615b3e079755384478dd3d64f2abb84f0b05b83d97d" exitCode=0 Nov 25 21:34:13 crc kubenswrapper[4910]: I1125 21:34:13.038056 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9n7q" event={"ID":"e3d90515-9230-46a3-9b1b-3f629346af0b","Type":"ContainerDied","Data":"334457e9527a781a272f2615b3e079755384478dd3d64f2abb84f0b05b83d97d"} Nov 25 21:34:13 crc kubenswrapper[4910]: I1125 21:34:13.124589 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:34:13 crc kubenswrapper[4910]: I1125 21:34:13.160329 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sv44b\" (UniqueName: \"kubernetes.io/projected/e3d90515-9230-46a3-9b1b-3f629346af0b-kube-api-access-sv44b\") pod \"e3d90515-9230-46a3-9b1b-3f629346af0b\" (UID: \"e3d90515-9230-46a3-9b1b-3f629346af0b\") " Nov 25 21:34:13 crc kubenswrapper[4910]: I1125 21:34:13.160398 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3d90515-9230-46a3-9b1b-3f629346af0b-catalog-content\") pod \"e3d90515-9230-46a3-9b1b-3f629346af0b\" (UID: \"e3d90515-9230-46a3-9b1b-3f629346af0b\") " Nov 25 21:34:13 crc kubenswrapper[4910]: I1125 21:34:13.160437 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3d90515-9230-46a3-9b1b-3f629346af0b-utilities\") pod \"e3d90515-9230-46a3-9b1b-3f629346af0b\" (UID: \"e3d90515-9230-46a3-9b1b-3f629346af0b\") " Nov 25 21:34:13 crc kubenswrapper[4910]: I1125 21:34:13.161355 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3d90515-9230-46a3-9b1b-3f629346af0b-utilities" (OuterVolumeSpecName: "utilities") pod "e3d90515-9230-46a3-9b1b-3f629346af0b" (UID: "e3d90515-9230-46a3-9b1b-3f629346af0b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:34:13 crc kubenswrapper[4910]: I1125 21:34:13.169441 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3d90515-9230-46a3-9b1b-3f629346af0b-kube-api-access-sv44b" (OuterVolumeSpecName: "kube-api-access-sv44b") pod "e3d90515-9230-46a3-9b1b-3f629346af0b" (UID: "e3d90515-9230-46a3-9b1b-3f629346af0b"). InnerVolumeSpecName "kube-api-access-sv44b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:34:13 crc kubenswrapper[4910]: I1125 21:34:13.217660 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f63a8e93-5656-4edc-9ee5-24314ebf749a" path="/var/lib/kubelet/pods/f63a8e93-5656-4edc-9ee5-24314ebf749a/volumes" Nov 25 21:34:13 crc kubenswrapper[4910]: I1125 21:34:13.260009 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3d90515-9230-46a3-9b1b-3f629346af0b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e3d90515-9230-46a3-9b1b-3f629346af0b" (UID: "e3d90515-9230-46a3-9b1b-3f629346af0b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:34:13 crc kubenswrapper[4910]: I1125 21:34:13.262526 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e3d90515-9230-46a3-9b1b-3f629346af0b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:13 crc kubenswrapper[4910]: I1125 21:34:13.262563 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sv44b\" (UniqueName: \"kubernetes.io/projected/e3d90515-9230-46a3-9b1b-3f629346af0b-kube-api-access-sv44b\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:13 crc kubenswrapper[4910]: I1125 21:34:13.262576 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e3d90515-9230-46a3-9b1b-3f629346af0b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:14 crc kubenswrapper[4910]: I1125 21:34:14.052768 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-l9n7q" event={"ID":"e3d90515-9230-46a3-9b1b-3f629346af0b","Type":"ContainerDied","Data":"700087422dd2ea142e48c78ecee839fb0693f236622c8e334cb53c3f6c7ee920"} Nov 25 21:34:14 crc kubenswrapper[4910]: I1125 21:34:14.052820 4910 scope.go:117] "RemoveContainer" containerID="334457e9527a781a272f2615b3e079755384478dd3d64f2abb84f0b05b83d97d" Nov 25 21:34:14 crc kubenswrapper[4910]: I1125 21:34:14.052920 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-l9n7q" Nov 25 21:34:14 crc kubenswrapper[4910]: I1125 21:34:14.100329 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-l9n7q"] Nov 25 21:34:14 crc kubenswrapper[4910]: I1125 21:34:14.107159 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-l9n7q"] Nov 25 21:34:14 crc kubenswrapper[4910]: I1125 21:34:14.110847 4910 scope.go:117] "RemoveContainer" containerID="f94834024a51fdfad2c6a2b968f0de0bd117d297b7363f98b695068800a2e2c0" Nov 25 21:34:14 crc kubenswrapper[4910]: I1125 21:34:14.133942 4910 scope.go:117] "RemoveContainer" containerID="638609d4b1db8d7949be7ed06753ad33fc6168589addb1eee36d1691b8d2ea7e" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.214138 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3d90515-9230-46a3-9b1b-3f629346af0b" path="/var/lib/kubelet/pods/e3d90515-9230-46a3-9b1b-3f629346af0b/volumes" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.835287 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-578876cb6-lpxg2"] Nov 25 21:34:15 crc kubenswrapper[4910]: E1125 21:34:15.835548 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3d90515-9230-46a3-9b1b-3f629346af0b" containerName="registry-server" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.835560 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3d90515-9230-46a3-9b1b-3f629346af0b" containerName="registry-server" Nov 25 21:34:15 crc kubenswrapper[4910]: E1125 21:34:15.835569 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e450be6d-4615-4cdf-8c24-edc01fa86412" containerName="extract-content" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.835574 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e450be6d-4615-4cdf-8c24-edc01fa86412" containerName="extract-content" Nov 25 21:34:15 crc kubenswrapper[4910]: E1125 21:34:15.835586 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e450be6d-4615-4cdf-8c24-edc01fa86412" containerName="registry-server" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.835593 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e450be6d-4615-4cdf-8c24-edc01fa86412" containerName="registry-server" Nov 25 21:34:15 crc kubenswrapper[4910]: E1125 21:34:15.835601 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e450be6d-4615-4cdf-8c24-edc01fa86412" containerName="extract-utilities" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.835614 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e450be6d-4615-4cdf-8c24-edc01fa86412" containerName="extract-utilities" Nov 25 21:34:15 crc kubenswrapper[4910]: E1125 21:34:15.835626 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3d90515-9230-46a3-9b1b-3f629346af0b" containerName="extract-content" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.835633 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3d90515-9230-46a3-9b1b-3f629346af0b" containerName="extract-content" Nov 25 21:34:15 crc kubenswrapper[4910]: E1125 21:34:15.835640 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3d90515-9230-46a3-9b1b-3f629346af0b" containerName="extract-utilities" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.835645 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3d90515-9230-46a3-9b1b-3f629346af0b" containerName="extract-utilities" Nov 25 21:34:15 crc kubenswrapper[4910]: E1125 21:34:15.835655 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f63a8e93-5656-4edc-9ee5-24314ebf749a" containerName="oauth-openshift" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.835661 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f63a8e93-5656-4edc-9ee5-24314ebf749a" containerName="oauth-openshift" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.835764 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e450be6d-4615-4cdf-8c24-edc01fa86412" containerName="registry-server" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.835784 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f63a8e93-5656-4edc-9ee5-24314ebf749a" containerName="oauth-openshift" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.835794 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3d90515-9230-46a3-9b1b-3f629346af0b" containerName="registry-server" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.836195 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.843985 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.845235 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.845231 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.845453 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.845920 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.846114 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.846275 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.846312 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.846404 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.846593 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.847866 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.849295 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.851867 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-578876cb6-lpxg2"] Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.859524 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.860778 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.864510 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.996911 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-audit-policies\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.996972 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-audit-dir\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.997004 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-cliconfig\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.997032 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.997053 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-user-template-error\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.997070 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.997123 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.997155 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-session\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.997173 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-router-certs\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.997194 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.997217 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-serving-cert\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.997235 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9v6n\" (UniqueName: \"kubernetes.io/projected/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-kube-api-access-g9v6n\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.997312 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-user-template-login\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:15 crc kubenswrapper[4910]: I1125 21:34:15.997338 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-service-ca\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.098290 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-service-ca\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.098348 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-audit-policies\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.098377 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-audit-dir\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.098393 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-cliconfig\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.098414 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.098432 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-user-template-error\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.098450 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.098522 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.098576 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-session\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.098599 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-router-certs\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.098624 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.098657 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-serving-cert\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.098687 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9v6n\" (UniqueName: \"kubernetes.io/projected/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-kube-api-access-g9v6n\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.098717 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-user-template-login\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.099534 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-audit-dir\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.100447 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-service-ca\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.100526 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-audit-policies\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.104860 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-cliconfig\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.105119 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.106127 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-user-template-login\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.106194 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.106541 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-serving-cert\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.106812 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-user-template-error\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.107403 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.108049 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.108267 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-router-certs\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.109079 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-v4-0-config-system-session\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.120976 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9v6n\" (UniqueName: \"kubernetes.io/projected/506f2f9f-c794-4b7e-84d2-c2c18ff41fe7-kube-api-access-g9v6n\") pod \"oauth-openshift-578876cb6-lpxg2\" (UID: \"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7\") " pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.162331 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:16 crc kubenswrapper[4910]: I1125 21:34:16.355306 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-578876cb6-lpxg2"] Nov 25 21:34:16 crc kubenswrapper[4910]: W1125 21:34:16.364300 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod506f2f9f_c794_4b7e_84d2_c2c18ff41fe7.slice/crio-ae4648239073de281f8a7e301cecbaef1367259acbfd72ea4c6cb74a3e827b8e WatchSource:0}: Error finding container ae4648239073de281f8a7e301cecbaef1367259acbfd72ea4c6cb74a3e827b8e: Status 404 returned error can't find the container with id ae4648239073de281f8a7e301cecbaef1367259acbfd72ea4c6cb74a3e827b8e Nov 25 21:34:17 crc kubenswrapper[4910]: I1125 21:34:17.070860 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" event={"ID":"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7","Type":"ContainerStarted","Data":"0266aa5218012e6df3f95fb85b7703f45e8a114860049b1aa303826ddd80abad"} Nov 25 21:34:17 crc kubenswrapper[4910]: I1125 21:34:17.071485 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" event={"ID":"506f2f9f-c794-4b7e-84d2-c2c18ff41fe7","Type":"ContainerStarted","Data":"ae4648239073de281f8a7e301cecbaef1367259acbfd72ea4c6cb74a3e827b8e"} Nov 25 21:34:17 crc kubenswrapper[4910]: I1125 21:34:17.071506 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:17 crc kubenswrapper[4910]: I1125 21:34:17.078260 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" Nov 25 21:34:17 crc kubenswrapper[4910]: I1125 21:34:17.093027 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-578876cb6-lpxg2" podStartSLOduration=32.093009193 podStartE2EDuration="32.093009193s" podCreationTimestamp="2025-11-25 21:33:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:34:17.092460167 +0000 UTC m=+212.554936489" watchObservedRunningTime="2025-11-25 21:34:17.093009193 +0000 UTC m=+212.555485505" Nov 25 21:34:23 crc kubenswrapper[4910]: I1125 21:34:23.099289 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:34:23 crc kubenswrapper[4910]: I1125 21:34:23.099632 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:34:23 crc kubenswrapper[4910]: I1125 21:34:23.099688 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:34:23 crc kubenswrapper[4910]: I1125 21:34:23.100382 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 21:34:23 crc kubenswrapper[4910]: I1125 21:34:23.100456 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547" gracePeriod=600 Nov 25 21:34:24 crc kubenswrapper[4910]: I1125 21:34:24.121670 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547" exitCode=0 Nov 25 21:34:24 crc kubenswrapper[4910]: I1125 21:34:24.121770 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547"} Nov 25 21:34:24 crc kubenswrapper[4910]: I1125 21:34:24.123129 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"5e4a10520c346a1193c3483b8b384d43f0615f88a77b72ac7b42de74d6a3c5d6"} Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.105218 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-h29m6"] Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.106499 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-h29m6" podUID="0b5ec6d7-57b0-4082-9310-a18457ea9c36" containerName="registry-server" containerID="cri-o://575d0c9c0f7eddd6cbad4f6ff99ec24e7d62cc20dd7f2d360fe3f8917217bbc1" gracePeriod=30 Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.111708 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5ffq9"] Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.111962 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5ffq9" podUID="26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" containerName="registry-server" containerID="cri-o://9e450963785ecfac63be7b08126cc1c87702e8f3a4cde44d3a024e4ade7f6fbe" gracePeriod=30 Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.126318 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-78rzv"] Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.127298 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" podUID="fb8c2b0c-00aa-406d-abb6-e989dbe3abea" containerName="marketplace-operator" containerID="cri-o://e2930ae664a56c68a5e0c5e1c5e0fd1ff5d53e85aece04c1d436680b0a3fe978" gracePeriod=30 Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.139275 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mkr2q"] Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.141522 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mkr2q" podUID="a49a70fd-643c-46d8-9f89-adf75cc92ca9" containerName="registry-server" containerID="cri-o://a4100f962ee4fbc16c0d0bd98352c6df5e9c50bd1308eb5a6d6eeec453662063" gracePeriod=30 Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.145039 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-smb6m"] Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.148942 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.156842 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-smb6m"] Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.160892 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xj7n6"] Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.161311 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-xj7n6" podUID="3e78dffa-f782-4a5e-a76c-d090263ad82e" containerName="registry-server" containerID="cri-o://962dcdd9bcfab281e26eceebd0059c6208c0294e21815433f56d90275f4572a2" gracePeriod=30 Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.252278 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qsp9\" (UniqueName: \"kubernetes.io/projected/e2af0945-04e5-4220-981f-d7a4892fcf69-kube-api-access-4qsp9\") pod \"marketplace-operator-79b997595-smb6m\" (UID: \"e2af0945-04e5-4220-981f-d7a4892fcf69\") " pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.252316 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e2af0945-04e5-4220-981f-d7a4892fcf69-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-smb6m\" (UID: \"e2af0945-04e5-4220-981f-d7a4892fcf69\") " pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.252340 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e2af0945-04e5-4220-981f-d7a4892fcf69-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-smb6m\" (UID: \"e2af0945-04e5-4220-981f-d7a4892fcf69\") " pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.353603 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qsp9\" (UniqueName: \"kubernetes.io/projected/e2af0945-04e5-4220-981f-d7a4892fcf69-kube-api-access-4qsp9\") pod \"marketplace-operator-79b997595-smb6m\" (UID: \"e2af0945-04e5-4220-981f-d7a4892fcf69\") " pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.354479 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e2af0945-04e5-4220-981f-d7a4892fcf69-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-smb6m\" (UID: \"e2af0945-04e5-4220-981f-d7a4892fcf69\") " pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.354680 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e2af0945-04e5-4220-981f-d7a4892fcf69-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-smb6m\" (UID: \"e2af0945-04e5-4220-981f-d7a4892fcf69\") " pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.421303 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/e2af0945-04e5-4220-981f-d7a4892fcf69-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-smb6m\" (UID: \"e2af0945-04e5-4220-981f-d7a4892fcf69\") " pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.427789 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qsp9\" (UniqueName: \"kubernetes.io/projected/e2af0945-04e5-4220-981f-d7a4892fcf69-kube-api-access-4qsp9\") pod \"marketplace-operator-79b997595-smb6m\" (UID: \"e2af0945-04e5-4220-981f-d7a4892fcf69\") " pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.427879 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e2af0945-04e5-4220-981f-d7a4892fcf69-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-smb6m\" (UID: \"e2af0945-04e5-4220-981f-d7a4892fcf69\") " pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.471707 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" Nov 25 21:34:36 crc kubenswrapper[4910]: I1125 21:34:36.913254 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-smb6m"] Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.246429 4910 generic.go:334] "Generic (PLEG): container finished" podID="fb8c2b0c-00aa-406d-abb6-e989dbe3abea" containerID="e2930ae664a56c68a5e0c5e1c5e0fd1ff5d53e85aece04c1d436680b0a3fe978" exitCode=0 Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.246636 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" event={"ID":"fb8c2b0c-00aa-406d-abb6-e989dbe3abea","Type":"ContainerDied","Data":"e2930ae664a56c68a5e0c5e1c5e0fd1ff5d53e85aece04c1d436680b0a3fe978"} Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.248518 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" event={"ID":"e2af0945-04e5-4220-981f-d7a4892fcf69","Type":"ContainerStarted","Data":"cd9b96999392eaaa59c0a2984b114a4c3625850c0c256377479b64cc8ecb1899"} Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.248544 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" event={"ID":"e2af0945-04e5-4220-981f-d7a4892fcf69","Type":"ContainerStarted","Data":"0835d4c933b6b6f4100113f96866799536a4b9234dbce5a05b3246fa0be09620"} Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.249418 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.250217 4910 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-smb6m container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.57:8080/healthz\": dial tcp 10.217.0.57:8080: connect: connection refused" start-of-body= Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.250270 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" podUID="e2af0945-04e5-4220-981f-d7a4892fcf69" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.57:8080/healthz\": dial tcp 10.217.0.57:8080: connect: connection refused" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.252392 4910 generic.go:334] "Generic (PLEG): container finished" podID="a49a70fd-643c-46d8-9f89-adf75cc92ca9" containerID="a4100f962ee4fbc16c0d0bd98352c6df5e9c50bd1308eb5a6d6eeec453662063" exitCode=0 Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.252447 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mkr2q" event={"ID":"a49a70fd-643c-46d8-9f89-adf75cc92ca9","Type":"ContainerDied","Data":"a4100f962ee4fbc16c0d0bd98352c6df5e9c50bd1308eb5a6d6eeec453662063"} Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.260693 4910 generic.go:334] "Generic (PLEG): container finished" podID="0b5ec6d7-57b0-4082-9310-a18457ea9c36" containerID="575d0c9c0f7eddd6cbad4f6ff99ec24e7d62cc20dd7f2d360fe3f8917217bbc1" exitCode=0 Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.260962 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h29m6" event={"ID":"0b5ec6d7-57b0-4082-9310-a18457ea9c36","Type":"ContainerDied","Data":"575d0c9c0f7eddd6cbad4f6ff99ec24e7d62cc20dd7f2d360fe3f8917217bbc1"} Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.264060 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5ffq9" event={"ID":"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c","Type":"ContainerDied","Data":"9e450963785ecfac63be7b08126cc1c87702e8f3a4cde44d3a024e4ade7f6fbe"} Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.263574 4910 generic.go:334] "Generic (PLEG): container finished" podID="26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" containerID="9e450963785ecfac63be7b08126cc1c87702e8f3a4cde44d3a024e4ade7f6fbe" exitCode=0 Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.266458 4910 generic.go:334] "Generic (PLEG): container finished" podID="3e78dffa-f782-4a5e-a76c-d090263ad82e" containerID="962dcdd9bcfab281e26eceebd0059c6208c0294e21815433f56d90275f4572a2" exitCode=0 Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.266495 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xj7n6" event={"ID":"3e78dffa-f782-4a5e-a76c-d090263ad82e","Type":"ContainerDied","Data":"962dcdd9bcfab281e26eceebd0059c6208c0294e21815433f56d90275f4572a2"} Nov 25 21:34:37 crc kubenswrapper[4910]: E1125 21:34:37.274601 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a4100f962ee4fbc16c0d0bd98352c6df5e9c50bd1308eb5a6d6eeec453662063 is running failed: container process not found" containerID="a4100f962ee4fbc16c0d0bd98352c6df5e9c50bd1308eb5a6d6eeec453662063" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 21:34:37 crc kubenswrapper[4910]: E1125 21:34:37.274894 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a4100f962ee4fbc16c0d0bd98352c6df5e9c50bd1308eb5a6d6eeec453662063 is running failed: container process not found" containerID="a4100f962ee4fbc16c0d0bd98352c6df5e9c50bd1308eb5a6d6eeec453662063" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 21:34:37 crc kubenswrapper[4910]: E1125 21:34:37.275151 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a4100f962ee4fbc16c0d0bd98352c6df5e9c50bd1308eb5a6d6eeec453662063 is running failed: container process not found" containerID="a4100f962ee4fbc16c0d0bd98352c6df5e9c50bd1308eb5a6d6eeec453662063" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 21:34:37 crc kubenswrapper[4910]: E1125 21:34:37.275338 4910 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a4100f962ee4fbc16c0d0bd98352c6df5e9c50bd1308eb5a6d6eeec453662063 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-mkr2q" podUID="a49a70fd-643c-46d8-9f89-adf75cc92ca9" containerName="registry-server" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.283143 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" podStartSLOduration=1.278322182 podStartE2EDuration="1.278322182s" podCreationTimestamp="2025-11-25 21:34:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:34:37.273567244 +0000 UTC m=+232.736043576" watchObservedRunningTime="2025-11-25 21:34:37.278322182 +0000 UTC m=+232.740798504" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.298509 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.377839 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-marketplace-operator-metrics\") pod \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\" (UID: \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.377882 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-marketplace-trusted-ca\") pod \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\" (UID: \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.378000 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ljmjk\" (UniqueName: \"kubernetes.io/projected/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-kube-api-access-ljmjk\") pod \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\" (UID: \"fb8c2b0c-00aa-406d-abb6-e989dbe3abea\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.379716 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "fb8c2b0c-00aa-406d-abb6-e989dbe3abea" (UID: "fb8c2b0c-00aa-406d-abb6-e989dbe3abea"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.385627 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-kube-api-access-ljmjk" (OuterVolumeSpecName: "kube-api-access-ljmjk") pod "fb8c2b0c-00aa-406d-abb6-e989dbe3abea" (UID: "fb8c2b0c-00aa-406d-abb6-e989dbe3abea"). InnerVolumeSpecName "kube-api-access-ljmjk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.387382 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "fb8c2b0c-00aa-406d-abb6-e989dbe3abea" (UID: "fb8c2b0c-00aa-406d-abb6-e989dbe3abea"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.407691 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.409917 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.414694 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.419364 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.478789 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b5ec6d7-57b0-4082-9310-a18457ea9c36-utilities\") pod \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\" (UID: \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.478883 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a49a70fd-643c-46d8-9f89-adf75cc92ca9-catalog-content\") pod \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\" (UID: \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.478909 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a49a70fd-643c-46d8-9f89-adf75cc92ca9-utilities\") pod \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\" (UID: \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.478956 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cscks\" (UniqueName: \"kubernetes.io/projected/0b5ec6d7-57b0-4082-9310-a18457ea9c36-kube-api-access-cscks\") pod \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\" (UID: \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.478973 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-utilities\") pod \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\" (UID: \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.479055 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzt4z\" (UniqueName: \"kubernetes.io/projected/3e78dffa-f782-4a5e-a76c-d090263ad82e-kube-api-access-tzt4z\") pod \"3e78dffa-f782-4a5e-a76c-d090263ad82e\" (UID: \"3e78dffa-f782-4a5e-a76c-d090263ad82e\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.479087 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e78dffa-f782-4a5e-a76c-d090263ad82e-utilities\") pod \"3e78dffa-f782-4a5e-a76c-d090263ad82e\" (UID: \"3e78dffa-f782-4a5e-a76c-d090263ad82e\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.479106 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-catalog-content\") pod \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\" (UID: \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.479134 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bsfcr\" (UniqueName: \"kubernetes.io/projected/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-kube-api-access-bsfcr\") pod \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\" (UID: \"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.479155 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r77n9\" (UniqueName: \"kubernetes.io/projected/a49a70fd-643c-46d8-9f89-adf75cc92ca9-kube-api-access-r77n9\") pod \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\" (UID: \"a49a70fd-643c-46d8-9f89-adf75cc92ca9\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.479170 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b5ec6d7-57b0-4082-9310-a18457ea9c36-catalog-content\") pod \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\" (UID: \"0b5ec6d7-57b0-4082-9310-a18457ea9c36\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.479189 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e78dffa-f782-4a5e-a76c-d090263ad82e-catalog-content\") pod \"3e78dffa-f782-4a5e-a76c-d090263ad82e\" (UID: \"3e78dffa-f782-4a5e-a76c-d090263ad82e\") " Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.479454 4910 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.479467 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ljmjk\" (UniqueName: \"kubernetes.io/projected/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-kube-api-access-ljmjk\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.479476 4910 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/fb8c2b0c-00aa-406d-abb6-e989dbe3abea-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.480465 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a49a70fd-643c-46d8-9f89-adf75cc92ca9-utilities" (OuterVolumeSpecName: "utilities") pod "a49a70fd-643c-46d8-9f89-adf75cc92ca9" (UID: "a49a70fd-643c-46d8-9f89-adf75cc92ca9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.480548 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b5ec6d7-57b0-4082-9310-a18457ea9c36-utilities" (OuterVolumeSpecName: "utilities") pod "0b5ec6d7-57b0-4082-9310-a18457ea9c36" (UID: "0b5ec6d7-57b0-4082-9310-a18457ea9c36"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.482419 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-utilities" (OuterVolumeSpecName: "utilities") pod "26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" (UID: "26e6b24a-b7f6-4ec9-bf8e-798436d1e43c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.485781 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e78dffa-f782-4a5e-a76c-d090263ad82e-utilities" (OuterVolumeSpecName: "utilities") pod "3e78dffa-f782-4a5e-a76c-d090263ad82e" (UID: "3e78dffa-f782-4a5e-a76c-d090263ad82e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.489800 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b5ec6d7-57b0-4082-9310-a18457ea9c36-kube-api-access-cscks" (OuterVolumeSpecName: "kube-api-access-cscks") pod "0b5ec6d7-57b0-4082-9310-a18457ea9c36" (UID: "0b5ec6d7-57b0-4082-9310-a18457ea9c36"). InnerVolumeSpecName "kube-api-access-cscks". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.489896 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-kube-api-access-bsfcr" (OuterVolumeSpecName: "kube-api-access-bsfcr") pod "26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" (UID: "26e6b24a-b7f6-4ec9-bf8e-798436d1e43c"). InnerVolumeSpecName "kube-api-access-bsfcr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.490026 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e78dffa-f782-4a5e-a76c-d090263ad82e-kube-api-access-tzt4z" (OuterVolumeSpecName: "kube-api-access-tzt4z") pod "3e78dffa-f782-4a5e-a76c-d090263ad82e" (UID: "3e78dffa-f782-4a5e-a76c-d090263ad82e"). InnerVolumeSpecName "kube-api-access-tzt4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.490884 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a49a70fd-643c-46d8-9f89-adf75cc92ca9-kube-api-access-r77n9" (OuterVolumeSpecName: "kube-api-access-r77n9") pod "a49a70fd-643c-46d8-9f89-adf75cc92ca9" (UID: "a49a70fd-643c-46d8-9f89-adf75cc92ca9"). InnerVolumeSpecName "kube-api-access-r77n9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.512897 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a49a70fd-643c-46d8-9f89-adf75cc92ca9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a49a70fd-643c-46d8-9f89-adf75cc92ca9" (UID: "a49a70fd-643c-46d8-9f89-adf75cc92ca9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.557041 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" (UID: "26e6b24a-b7f6-4ec9-bf8e-798436d1e43c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.560440 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b5ec6d7-57b0-4082-9310-a18457ea9c36-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0b5ec6d7-57b0-4082-9310-a18457ea9c36" (UID: "0b5ec6d7-57b0-4082-9310-a18457ea9c36"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.580347 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cscks\" (UniqueName: \"kubernetes.io/projected/0b5ec6d7-57b0-4082-9310-a18457ea9c36-kube-api-access-cscks\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.580378 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.580391 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzt4z\" (UniqueName: \"kubernetes.io/projected/3e78dffa-f782-4a5e-a76c-d090263ad82e-kube-api-access-tzt4z\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.580401 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3e78dffa-f782-4a5e-a76c-d090263ad82e-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.580410 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.580419 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bsfcr\" (UniqueName: \"kubernetes.io/projected/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c-kube-api-access-bsfcr\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.580428 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r77n9\" (UniqueName: \"kubernetes.io/projected/a49a70fd-643c-46d8-9f89-adf75cc92ca9-kube-api-access-r77n9\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.580436 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b5ec6d7-57b0-4082-9310-a18457ea9c36-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.580445 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b5ec6d7-57b0-4082-9310-a18457ea9c36-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.580454 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a49a70fd-643c-46d8-9f89-adf75cc92ca9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.580464 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a49a70fd-643c-46d8-9f89-adf75cc92ca9-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.592502 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3e78dffa-f782-4a5e-a76c-d090263ad82e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3e78dffa-f782-4a5e-a76c-d090263ad82e" (UID: "3e78dffa-f782-4a5e-a76c-d090263ad82e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:34:37 crc kubenswrapper[4910]: I1125 21:34:37.681677 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3e78dffa-f782-4a5e-a76c-d090263ad82e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.273347 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" event={"ID":"fb8c2b0c-00aa-406d-abb6-e989dbe3abea","Type":"ContainerDied","Data":"e67b97308ff8630ac0d7ef4093f371460010b8a363522540a066ed5c43e93beb"} Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.273661 4910 scope.go:117] "RemoveContainer" containerID="e2930ae664a56c68a5e0c5e1c5e0fd1ff5d53e85aece04c1d436680b0a3fe978" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.273452 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-78rzv" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.275924 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mkr2q" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.276124 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mkr2q" event={"ID":"a49a70fd-643c-46d8-9f89-adf75cc92ca9","Type":"ContainerDied","Data":"5ce4f8234dded60ad6ca946ec354f2aa463a4b2e591679c57e38623ab37752b7"} Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.278120 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-h29m6" event={"ID":"0b5ec6d7-57b0-4082-9310-a18457ea9c36","Type":"ContainerDied","Data":"4872b33c8aaf0f41daa2aced35e06454a8afc2148437f34f1e104b48bd55c519"} Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.278204 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-h29m6" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.283729 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5ffq9" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.283725 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5ffq9" event={"ID":"26e6b24a-b7f6-4ec9-bf8e-798436d1e43c","Type":"ContainerDied","Data":"11676b0cc3a89aca6956c72aa6469d1464b9a2e349851080abe07748d7e6b04d"} Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.286181 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xj7n6" event={"ID":"3e78dffa-f782-4a5e-a76c-d090263ad82e","Type":"ContainerDied","Data":"a2c6e692ddd207a718332ad0e1e0729148bd48ede90277b4be1fafb2391210aa"} Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.286208 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xj7n6" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.290751 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-smb6m" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.307388 4910 scope.go:117] "RemoveContainer" containerID="a4100f962ee4fbc16c0d0bd98352c6df5e9c50bd1308eb5a6d6eeec453662063" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.312754 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mkr2q"] Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.324930 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mkr2q"] Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.325593 4910 scope.go:117] "RemoveContainer" containerID="1d40fb3fdc6ca0028f92694b5239f6b811737ee70d9d73109afc7b677f8ff8b5" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.349185 4910 scope.go:117] "RemoveContainer" containerID="84b5d2ad0554363f3b09f340e8de430c715d0de4de6b1f71b1c055c1cb18e4d4" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.356525 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-78rzv"] Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.361137 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-78rzv"] Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.370446 4910 scope.go:117] "RemoveContainer" containerID="575d0c9c0f7eddd6cbad4f6ff99ec24e7d62cc20dd7f2d360fe3f8917217bbc1" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.381403 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5ffq9"] Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.383909 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5ffq9"] Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.387127 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-h29m6"] Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.390090 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-h29m6"] Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.394393 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-xj7n6"] Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.397816 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-xj7n6"] Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.400481 4910 scope.go:117] "RemoveContainer" containerID="18b2f9393b35207928289eb9d3b2672b66a00642e9da3c476c05bdbcf27d2888" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.417579 4910 scope.go:117] "RemoveContainer" containerID="36b8a649a3e7ed02d84475c333f51423b8c8b2b7d60117b5cd8fcf741b9e6c4b" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.430479 4910 scope.go:117] "RemoveContainer" containerID="9e450963785ecfac63be7b08126cc1c87702e8f3a4cde44d3a024e4ade7f6fbe" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.444435 4910 scope.go:117] "RemoveContainer" containerID="3f171b7a81e199032665f7e2908fe69b1fb829a9c3b6e9715fec07b3ad2a36a5" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.457602 4910 scope.go:117] "RemoveContainer" containerID="cff3ef328153b2253505b5b04dc06523d33255d27e453ebad175d61f87084c4e" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.470314 4910 scope.go:117] "RemoveContainer" containerID="962dcdd9bcfab281e26eceebd0059c6208c0294e21815433f56d90275f4572a2" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.483226 4910 scope.go:117] "RemoveContainer" containerID="b3c344018e3a117c0ee651ab6cdc9dfe469146c0278b99115f9abde0cd2d060f" Nov 25 21:34:38 crc kubenswrapper[4910]: I1125 21:34:38.497073 4910 scope.go:117] "RemoveContainer" containerID="50be094ef8501c1f6199c7d52f5f6ca9a7d14e7e4de65e60c7b0abb9ab595869" Nov 25 21:34:39 crc kubenswrapper[4910]: I1125 21:34:39.219393 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b5ec6d7-57b0-4082-9310-a18457ea9c36" path="/var/lib/kubelet/pods/0b5ec6d7-57b0-4082-9310-a18457ea9c36/volumes" Nov 25 21:34:39 crc kubenswrapper[4910]: I1125 21:34:39.221171 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" path="/var/lib/kubelet/pods/26e6b24a-b7f6-4ec9-bf8e-798436d1e43c/volumes" Nov 25 21:34:39 crc kubenswrapper[4910]: I1125 21:34:39.222533 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e78dffa-f782-4a5e-a76c-d090263ad82e" path="/var/lib/kubelet/pods/3e78dffa-f782-4a5e-a76c-d090263ad82e/volumes" Nov 25 21:34:39 crc kubenswrapper[4910]: I1125 21:34:39.224657 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a49a70fd-643c-46d8-9f89-adf75cc92ca9" path="/var/lib/kubelet/pods/a49a70fd-643c-46d8-9f89-adf75cc92ca9/volumes" Nov 25 21:34:39 crc kubenswrapper[4910]: I1125 21:34:39.226023 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb8c2b0c-00aa-406d-abb6-e989dbe3abea" path="/var/lib/kubelet/pods/fb8c2b0c-00aa-406d-abb6-e989dbe3abea/volumes" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472010 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-7h25w"] Nov 25 21:34:40 crc kubenswrapper[4910]: E1125 21:34:40.472472 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b5ec6d7-57b0-4082-9310-a18457ea9c36" containerName="extract-content" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472484 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b5ec6d7-57b0-4082-9310-a18457ea9c36" containerName="extract-content" Nov 25 21:34:40 crc kubenswrapper[4910]: E1125 21:34:40.472495 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" containerName="extract-utilities" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472501 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" containerName="extract-utilities" Nov 25 21:34:40 crc kubenswrapper[4910]: E1125 21:34:40.472510 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a49a70fd-643c-46d8-9f89-adf75cc92ca9" containerName="extract-utilities" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472518 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a49a70fd-643c-46d8-9f89-adf75cc92ca9" containerName="extract-utilities" Nov 25 21:34:40 crc kubenswrapper[4910]: E1125 21:34:40.472537 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" containerName="registry-server" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472545 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" containerName="registry-server" Nov 25 21:34:40 crc kubenswrapper[4910]: E1125 21:34:40.472558 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb8c2b0c-00aa-406d-abb6-e989dbe3abea" containerName="marketplace-operator" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472565 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb8c2b0c-00aa-406d-abb6-e989dbe3abea" containerName="marketplace-operator" Nov 25 21:34:40 crc kubenswrapper[4910]: E1125 21:34:40.472577 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" containerName="extract-content" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472583 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" containerName="extract-content" Nov 25 21:34:40 crc kubenswrapper[4910]: E1125 21:34:40.472589 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a49a70fd-643c-46d8-9f89-adf75cc92ca9" containerName="extract-content" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472596 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a49a70fd-643c-46d8-9f89-adf75cc92ca9" containerName="extract-content" Nov 25 21:34:40 crc kubenswrapper[4910]: E1125 21:34:40.472602 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a49a70fd-643c-46d8-9f89-adf75cc92ca9" containerName="registry-server" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472608 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a49a70fd-643c-46d8-9f89-adf75cc92ca9" containerName="registry-server" Nov 25 21:34:40 crc kubenswrapper[4910]: E1125 21:34:40.472617 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b5ec6d7-57b0-4082-9310-a18457ea9c36" containerName="extract-utilities" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472623 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b5ec6d7-57b0-4082-9310-a18457ea9c36" containerName="extract-utilities" Nov 25 21:34:40 crc kubenswrapper[4910]: E1125 21:34:40.472630 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b5ec6d7-57b0-4082-9310-a18457ea9c36" containerName="registry-server" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472636 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b5ec6d7-57b0-4082-9310-a18457ea9c36" containerName="registry-server" Nov 25 21:34:40 crc kubenswrapper[4910]: E1125 21:34:40.472645 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e78dffa-f782-4a5e-a76c-d090263ad82e" containerName="registry-server" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472651 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e78dffa-f782-4a5e-a76c-d090263ad82e" containerName="registry-server" Nov 25 21:34:40 crc kubenswrapper[4910]: E1125 21:34:40.472661 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e78dffa-f782-4a5e-a76c-d090263ad82e" containerName="extract-utilities" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472666 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e78dffa-f782-4a5e-a76c-d090263ad82e" containerName="extract-utilities" Nov 25 21:34:40 crc kubenswrapper[4910]: E1125 21:34:40.472673 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e78dffa-f782-4a5e-a76c-d090263ad82e" containerName="extract-content" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472678 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e78dffa-f782-4a5e-a76c-d090263ad82e" containerName="extract-content" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472772 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="26e6b24a-b7f6-4ec9-bf8e-798436d1e43c" containerName="registry-server" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472781 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e78dffa-f782-4a5e-a76c-d090263ad82e" containerName="registry-server" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472788 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb8c2b0c-00aa-406d-abb6-e989dbe3abea" containerName="marketplace-operator" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472796 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b5ec6d7-57b0-4082-9310-a18457ea9c36" containerName="registry-server" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.472804 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a49a70fd-643c-46d8-9f89-adf75cc92ca9" containerName="registry-server" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.474435 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.477000 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.485493 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7h25w"] Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.621956 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3392ac-5439-4adc-8e8c-1378d37225f3-utilities\") pod \"redhat-operators-7h25w\" (UID: \"bd3392ac-5439-4adc-8e8c-1378d37225f3\") " pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.622184 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwblf\" (UniqueName: \"kubernetes.io/projected/bd3392ac-5439-4adc-8e8c-1378d37225f3-kube-api-access-zwblf\") pod \"redhat-operators-7h25w\" (UID: \"bd3392ac-5439-4adc-8e8c-1378d37225f3\") " pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.622288 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3392ac-5439-4adc-8e8c-1378d37225f3-catalog-content\") pod \"redhat-operators-7h25w\" (UID: \"bd3392ac-5439-4adc-8e8c-1378d37225f3\") " pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.675185 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-whlck"] Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.676467 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.678156 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.685972 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-whlck"] Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.723502 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwblf\" (UniqueName: \"kubernetes.io/projected/bd3392ac-5439-4adc-8e8c-1378d37225f3-kube-api-access-zwblf\") pod \"redhat-operators-7h25w\" (UID: \"bd3392ac-5439-4adc-8e8c-1378d37225f3\") " pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.723555 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3392ac-5439-4adc-8e8c-1378d37225f3-catalog-content\") pod \"redhat-operators-7h25w\" (UID: \"bd3392ac-5439-4adc-8e8c-1378d37225f3\") " pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.723588 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3392ac-5439-4adc-8e8c-1378d37225f3-utilities\") pod \"redhat-operators-7h25w\" (UID: \"bd3392ac-5439-4adc-8e8c-1378d37225f3\") " pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.723971 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3392ac-5439-4adc-8e8c-1378d37225f3-utilities\") pod \"redhat-operators-7h25w\" (UID: \"bd3392ac-5439-4adc-8e8c-1378d37225f3\") " pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.724082 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3392ac-5439-4adc-8e8c-1378d37225f3-catalog-content\") pod \"redhat-operators-7h25w\" (UID: \"bd3392ac-5439-4adc-8e8c-1378d37225f3\") " pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.745311 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwblf\" (UniqueName: \"kubernetes.io/projected/bd3392ac-5439-4adc-8e8c-1378d37225f3-kube-api-access-zwblf\") pod \"redhat-operators-7h25w\" (UID: \"bd3392ac-5439-4adc-8e8c-1378d37225f3\") " pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.824984 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a179743-09db-4281-b030-23d453ecc1d6-catalog-content\") pod \"community-operators-whlck\" (UID: \"2a179743-09db-4281-b030-23d453ecc1d6\") " pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.825033 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a179743-09db-4281-b030-23d453ecc1d6-utilities\") pod \"community-operators-whlck\" (UID: \"2a179743-09db-4281-b030-23d453ecc1d6\") " pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.825053 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ql29t\" (UniqueName: \"kubernetes.io/projected/2a179743-09db-4281-b030-23d453ecc1d6-kube-api-access-ql29t\") pod \"community-operators-whlck\" (UID: \"2a179743-09db-4281-b030-23d453ecc1d6\") " pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.831795 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.925876 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a179743-09db-4281-b030-23d453ecc1d6-catalog-content\") pod \"community-operators-whlck\" (UID: \"2a179743-09db-4281-b030-23d453ecc1d6\") " pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.926199 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a179743-09db-4281-b030-23d453ecc1d6-utilities\") pod \"community-operators-whlck\" (UID: \"2a179743-09db-4281-b030-23d453ecc1d6\") " pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.926222 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ql29t\" (UniqueName: \"kubernetes.io/projected/2a179743-09db-4281-b030-23d453ecc1d6-kube-api-access-ql29t\") pod \"community-operators-whlck\" (UID: \"2a179743-09db-4281-b030-23d453ecc1d6\") " pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.926706 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2a179743-09db-4281-b030-23d453ecc1d6-catalog-content\") pod \"community-operators-whlck\" (UID: \"2a179743-09db-4281-b030-23d453ecc1d6\") " pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.926994 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2a179743-09db-4281-b030-23d453ecc1d6-utilities\") pod \"community-operators-whlck\" (UID: \"2a179743-09db-4281-b030-23d453ecc1d6\") " pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.947358 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ql29t\" (UniqueName: \"kubernetes.io/projected/2a179743-09db-4281-b030-23d453ecc1d6-kube-api-access-ql29t\") pod \"community-operators-whlck\" (UID: \"2a179743-09db-4281-b030-23d453ecc1d6\") " pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:40 crc kubenswrapper[4910]: I1125 21:34:40.997992 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:41 crc kubenswrapper[4910]: I1125 21:34:41.024718 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-7h25w"] Nov 25 21:34:41 crc kubenswrapper[4910]: W1125 21:34:41.033000 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbd3392ac_5439_4adc_8e8c_1378d37225f3.slice/crio-f5f5d2d0380a37628f47381f10c43d66c25b003dc90531a257442a1a6252a781 WatchSource:0}: Error finding container f5f5d2d0380a37628f47381f10c43d66c25b003dc90531a257442a1a6252a781: Status 404 returned error can't find the container with id f5f5d2d0380a37628f47381f10c43d66c25b003dc90531a257442a1a6252a781 Nov 25 21:34:41 crc kubenswrapper[4910]: I1125 21:34:41.311895 4910 generic.go:334] "Generic (PLEG): container finished" podID="bd3392ac-5439-4adc-8e8c-1378d37225f3" containerID="f60ac7ca38b33af73afe8d247e08f3bc80f11eccadb1f358b0d6ae1884411e99" exitCode=0 Nov 25 21:34:41 crc kubenswrapper[4910]: I1125 21:34:41.312059 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7h25w" event={"ID":"bd3392ac-5439-4adc-8e8c-1378d37225f3","Type":"ContainerDied","Data":"f60ac7ca38b33af73afe8d247e08f3bc80f11eccadb1f358b0d6ae1884411e99"} Nov 25 21:34:41 crc kubenswrapper[4910]: I1125 21:34:41.312184 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7h25w" event={"ID":"bd3392ac-5439-4adc-8e8c-1378d37225f3","Type":"ContainerStarted","Data":"f5f5d2d0380a37628f47381f10c43d66c25b003dc90531a257442a1a6252a781"} Nov 25 21:34:41 crc kubenswrapper[4910]: I1125 21:34:41.408311 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-whlck"] Nov 25 21:34:41 crc kubenswrapper[4910]: W1125 21:34:41.413234 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2a179743_09db_4281_b030_23d453ecc1d6.slice/crio-a105d363921d75cc07534d901bfeec746a6c834d03ea119e117f88e8d8d64a3d WatchSource:0}: Error finding container a105d363921d75cc07534d901bfeec746a6c834d03ea119e117f88e8d8d64a3d: Status 404 returned error can't find the container with id a105d363921d75cc07534d901bfeec746a6c834d03ea119e117f88e8d8d64a3d Nov 25 21:34:42 crc kubenswrapper[4910]: I1125 21:34:42.319238 4910 generic.go:334] "Generic (PLEG): container finished" podID="2a179743-09db-4281-b030-23d453ecc1d6" containerID="0f75883d00f916457955e236e9a10cf7b5768574658f375e8973d19fbef2e07e" exitCode=0 Nov 25 21:34:42 crc kubenswrapper[4910]: I1125 21:34:42.319364 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-whlck" event={"ID":"2a179743-09db-4281-b030-23d453ecc1d6","Type":"ContainerDied","Data":"0f75883d00f916457955e236e9a10cf7b5768574658f375e8973d19fbef2e07e"} Nov 25 21:34:42 crc kubenswrapper[4910]: I1125 21:34:42.319550 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-whlck" event={"ID":"2a179743-09db-4281-b030-23d453ecc1d6","Type":"ContainerStarted","Data":"a105d363921d75cc07534d901bfeec746a6c834d03ea119e117f88e8d8d64a3d"} Nov 25 21:34:42 crc kubenswrapper[4910]: I1125 21:34:42.873420 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-776qn"] Nov 25 21:34:42 crc kubenswrapper[4910]: I1125 21:34:42.874568 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:42 crc kubenswrapper[4910]: I1125 21:34:42.877372 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 21:34:42 crc kubenswrapper[4910]: I1125 21:34:42.889545 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-776qn"] Nov 25 21:34:42 crc kubenswrapper[4910]: I1125 21:34:42.956039 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d6e52b7-c568-4b50-9af0-70a8ce753479-utilities\") pod \"certified-operators-776qn\" (UID: \"7d6e52b7-c568-4b50-9af0-70a8ce753479\") " pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:42 crc kubenswrapper[4910]: I1125 21:34:42.956082 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d6e52b7-c568-4b50-9af0-70a8ce753479-catalog-content\") pod \"certified-operators-776qn\" (UID: \"7d6e52b7-c568-4b50-9af0-70a8ce753479\") " pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:42 crc kubenswrapper[4910]: I1125 21:34:42.956135 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klkn6\" (UniqueName: \"kubernetes.io/projected/7d6e52b7-c568-4b50-9af0-70a8ce753479-kube-api-access-klkn6\") pod \"certified-operators-776qn\" (UID: \"7d6e52b7-c568-4b50-9af0-70a8ce753479\") " pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.057427 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klkn6\" (UniqueName: \"kubernetes.io/projected/7d6e52b7-c568-4b50-9af0-70a8ce753479-kube-api-access-klkn6\") pod \"certified-operators-776qn\" (UID: \"7d6e52b7-c568-4b50-9af0-70a8ce753479\") " pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.057510 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d6e52b7-c568-4b50-9af0-70a8ce753479-utilities\") pod \"certified-operators-776qn\" (UID: \"7d6e52b7-c568-4b50-9af0-70a8ce753479\") " pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.057533 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d6e52b7-c568-4b50-9af0-70a8ce753479-catalog-content\") pod \"certified-operators-776qn\" (UID: \"7d6e52b7-c568-4b50-9af0-70a8ce753479\") " pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.058139 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7d6e52b7-c568-4b50-9af0-70a8ce753479-utilities\") pod \"certified-operators-776qn\" (UID: \"7d6e52b7-c568-4b50-9af0-70a8ce753479\") " pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.058160 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7d6e52b7-c568-4b50-9af0-70a8ce753479-catalog-content\") pod \"certified-operators-776qn\" (UID: \"7d6e52b7-c568-4b50-9af0-70a8ce753479\") " pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.070455 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-drk5r"] Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.071660 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.073341 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.078960 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klkn6\" (UniqueName: \"kubernetes.io/projected/7d6e52b7-c568-4b50-9af0-70a8ce753479-kube-api-access-klkn6\") pod \"certified-operators-776qn\" (UID: \"7d6e52b7-c568-4b50-9af0-70a8ce753479\") " pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.085115 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-drk5r"] Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.158703 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ada3f847-455b-464f-9d23-7052e0d91f2b-catalog-content\") pod \"redhat-marketplace-drk5r\" (UID: \"ada3f847-455b-464f-9d23-7052e0d91f2b\") " pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.158792 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ada3f847-455b-464f-9d23-7052e0d91f2b-utilities\") pod \"redhat-marketplace-drk5r\" (UID: \"ada3f847-455b-464f-9d23-7052e0d91f2b\") " pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.158892 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9wnj\" (UniqueName: \"kubernetes.io/projected/ada3f847-455b-464f-9d23-7052e0d91f2b-kube-api-access-h9wnj\") pod \"redhat-marketplace-drk5r\" (UID: \"ada3f847-455b-464f-9d23-7052e0d91f2b\") " pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.199954 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.259657 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ada3f847-455b-464f-9d23-7052e0d91f2b-utilities\") pod \"redhat-marketplace-drk5r\" (UID: \"ada3f847-455b-464f-9d23-7052e0d91f2b\") " pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.259730 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9wnj\" (UniqueName: \"kubernetes.io/projected/ada3f847-455b-464f-9d23-7052e0d91f2b-kube-api-access-h9wnj\") pod \"redhat-marketplace-drk5r\" (UID: \"ada3f847-455b-464f-9d23-7052e0d91f2b\") " pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.260175 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ada3f847-455b-464f-9d23-7052e0d91f2b-utilities\") pod \"redhat-marketplace-drk5r\" (UID: \"ada3f847-455b-464f-9d23-7052e0d91f2b\") " pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.260510 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ada3f847-455b-464f-9d23-7052e0d91f2b-catalog-content\") pod \"redhat-marketplace-drk5r\" (UID: \"ada3f847-455b-464f-9d23-7052e0d91f2b\") " pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.260627 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ada3f847-455b-464f-9d23-7052e0d91f2b-catalog-content\") pod \"redhat-marketplace-drk5r\" (UID: \"ada3f847-455b-464f-9d23-7052e0d91f2b\") " pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.276404 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9wnj\" (UniqueName: \"kubernetes.io/projected/ada3f847-455b-464f-9d23-7052e0d91f2b-kube-api-access-h9wnj\") pod \"redhat-marketplace-drk5r\" (UID: \"ada3f847-455b-464f-9d23-7052e0d91f2b\") " pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.409903 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.614582 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-776qn"] Nov 25 21:34:43 crc kubenswrapper[4910]: W1125 21:34:43.621368 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d6e52b7_c568_4b50_9af0_70a8ce753479.slice/crio-3d3c0967b4863c7fa6865c13820dfc25d120db40ef9342629c0ccff4f917f72e WatchSource:0}: Error finding container 3d3c0967b4863c7fa6865c13820dfc25d120db40ef9342629c0ccff4f917f72e: Status 404 returned error can't find the container with id 3d3c0967b4863c7fa6865c13820dfc25d120db40ef9342629c0ccff4f917f72e Nov 25 21:34:43 crc kubenswrapper[4910]: I1125 21:34:43.815700 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-drk5r"] Nov 25 21:34:43 crc kubenswrapper[4910]: W1125 21:34:43.823919 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podada3f847_455b_464f_9d23_7052e0d91f2b.slice/crio-4cb45c9655d2583d5d572d617f2af2430de5a0af7c0afe9352a5d58b78446fec WatchSource:0}: Error finding container 4cb45c9655d2583d5d572d617f2af2430de5a0af7c0afe9352a5d58b78446fec: Status 404 returned error can't find the container with id 4cb45c9655d2583d5d572d617f2af2430de5a0af7c0afe9352a5d58b78446fec Nov 25 21:34:44 crc kubenswrapper[4910]: I1125 21:34:44.330638 4910 generic.go:334] "Generic (PLEG): container finished" podID="ada3f847-455b-464f-9d23-7052e0d91f2b" containerID="7c8df92bb8b36eb0ac2dabce3da2ff06f69431263428e1d1c7a16fffc9d14020" exitCode=0 Nov 25 21:34:44 crc kubenswrapper[4910]: I1125 21:34:44.330956 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-drk5r" event={"ID":"ada3f847-455b-464f-9d23-7052e0d91f2b","Type":"ContainerDied","Data":"7c8df92bb8b36eb0ac2dabce3da2ff06f69431263428e1d1c7a16fffc9d14020"} Nov 25 21:34:44 crc kubenswrapper[4910]: I1125 21:34:44.330984 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-drk5r" event={"ID":"ada3f847-455b-464f-9d23-7052e0d91f2b","Type":"ContainerStarted","Data":"4cb45c9655d2583d5d572d617f2af2430de5a0af7c0afe9352a5d58b78446fec"} Nov 25 21:34:44 crc kubenswrapper[4910]: I1125 21:34:44.333119 4910 generic.go:334] "Generic (PLEG): container finished" podID="7d6e52b7-c568-4b50-9af0-70a8ce753479" containerID="b65c2618435fef26f1888d73dc2ee646671eadc82549e45e22dcf6eb25a355a6" exitCode=0 Nov 25 21:34:44 crc kubenswrapper[4910]: I1125 21:34:44.333192 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-776qn" event={"ID":"7d6e52b7-c568-4b50-9af0-70a8ce753479","Type":"ContainerDied","Data":"b65c2618435fef26f1888d73dc2ee646671eadc82549e45e22dcf6eb25a355a6"} Nov 25 21:34:44 crc kubenswrapper[4910]: I1125 21:34:44.333211 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-776qn" event={"ID":"7d6e52b7-c568-4b50-9af0-70a8ce753479","Type":"ContainerStarted","Data":"3d3c0967b4863c7fa6865c13820dfc25d120db40ef9342629c0ccff4f917f72e"} Nov 25 21:34:44 crc kubenswrapper[4910]: I1125 21:34:44.341024 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7h25w" event={"ID":"bd3392ac-5439-4adc-8e8c-1378d37225f3","Type":"ContainerStarted","Data":"0c726b694051c5e9eefb1b68d8015468187213d31e92683bcf6d9f431a1c3c9b"} Nov 25 21:34:44 crc kubenswrapper[4910]: I1125 21:34:44.344213 4910 generic.go:334] "Generic (PLEG): container finished" podID="2a179743-09db-4281-b030-23d453ecc1d6" containerID="295d88c8b7a965a02be5bdb0adc593d4b46864a22f3b4e0773445db2c6c1407e" exitCode=0 Nov 25 21:34:44 crc kubenswrapper[4910]: I1125 21:34:44.344296 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-whlck" event={"ID":"2a179743-09db-4281-b030-23d453ecc1d6","Type":"ContainerDied","Data":"295d88c8b7a965a02be5bdb0adc593d4b46864a22f3b4e0773445db2c6c1407e"} Nov 25 21:34:45 crc kubenswrapper[4910]: I1125 21:34:45.357304 4910 generic.go:334] "Generic (PLEG): container finished" podID="ada3f847-455b-464f-9d23-7052e0d91f2b" containerID="1081d8a488d0a390d9cfa752a4f3af6be9bb072b92d6b2341d1f91a555925409" exitCode=0 Nov 25 21:34:45 crc kubenswrapper[4910]: I1125 21:34:45.357393 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-drk5r" event={"ID":"ada3f847-455b-464f-9d23-7052e0d91f2b","Type":"ContainerDied","Data":"1081d8a488d0a390d9cfa752a4f3af6be9bb072b92d6b2341d1f91a555925409"} Nov 25 21:34:45 crc kubenswrapper[4910]: I1125 21:34:45.361658 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-whlck" event={"ID":"2a179743-09db-4281-b030-23d453ecc1d6","Type":"ContainerStarted","Data":"143c4b1ff14e42cd05795dee2ea9828aebc58a77d9e4cf10620f2666e1a00b79"} Nov 25 21:34:45 crc kubenswrapper[4910]: I1125 21:34:45.365055 4910 generic.go:334] "Generic (PLEG): container finished" podID="bd3392ac-5439-4adc-8e8c-1378d37225f3" containerID="0c726b694051c5e9eefb1b68d8015468187213d31e92683bcf6d9f431a1c3c9b" exitCode=0 Nov 25 21:34:45 crc kubenswrapper[4910]: I1125 21:34:45.365104 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7h25w" event={"ID":"bd3392ac-5439-4adc-8e8c-1378d37225f3","Type":"ContainerDied","Data":"0c726b694051c5e9eefb1b68d8015468187213d31e92683bcf6d9f431a1c3c9b"} Nov 25 21:34:45 crc kubenswrapper[4910]: I1125 21:34:45.423893 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-whlck" podStartSLOduration=2.974117153 podStartE2EDuration="5.423874524s" podCreationTimestamp="2025-11-25 21:34:40 +0000 UTC" firstStartedPulling="2025-11-25 21:34:42.320893279 +0000 UTC m=+237.783369601" lastFinishedPulling="2025-11-25 21:34:44.77065065 +0000 UTC m=+240.233126972" observedRunningTime="2025-11-25 21:34:45.423641218 +0000 UTC m=+240.886117540" watchObservedRunningTime="2025-11-25 21:34:45.423874524 +0000 UTC m=+240.886350846" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.169807 4910 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.171017 4910 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.171193 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.171368 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879" gracePeriod=15 Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.171395 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a" gracePeriod=15 Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.171489 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e" gracePeriod=15 Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.171537 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83" gracePeriod=15 Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.171561 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609" gracePeriod=15 Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172048 4910 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 21:34:47 crc kubenswrapper[4910]: E1125 21:34:47.172199 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172213 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 21:34:47 crc kubenswrapper[4910]: E1125 21:34:47.172227 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172235 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 21:34:47 crc kubenswrapper[4910]: E1125 21:34:47.172263 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172273 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 25 21:34:47 crc kubenswrapper[4910]: E1125 21:34:47.172284 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172294 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 21:34:47 crc kubenswrapper[4910]: E1125 21:34:47.172306 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172314 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 21:34:47 crc kubenswrapper[4910]: E1125 21:34:47.172325 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172334 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 21:34:47 crc kubenswrapper[4910]: E1125 21:34:47.172344 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172352 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 21:34:47 crc kubenswrapper[4910]: E1125 21:34:47.172362 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172370 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172488 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172501 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172512 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172523 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172532 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172555 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.172564 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.211232 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.320002 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.320063 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.320101 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.320127 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.320166 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.320193 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.320222 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.320262 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.385835 4910 generic.go:334] "Generic (PLEG): container finished" podID="7d6e52b7-c568-4b50-9af0-70a8ce753479" containerID="7c6891c54817fc89809719df1db8cae5800dcbbcd785da76de4e79dff3ba004d" exitCode=0 Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.385936 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-776qn" event={"ID":"7d6e52b7-c568-4b50-9af0-70a8ce753479","Type":"ContainerDied","Data":"7c6891c54817fc89809719df1db8cae5800dcbbcd785da76de4e79dff3ba004d"} Nov 25 21:34:47 crc kubenswrapper[4910]: E1125 21:34:47.390052 4910 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.142:6443: connect: connection refused" event="&Event{ObjectMeta:{certified-operators-776qn.187b5d80eecf2cd0 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:certified-operators-776qn,UID:7d6e52b7-c568-4b50-9af0-70a8ce753479,APIVersion:v1,ResourceVersion:29482,FieldPath:spec.containers{registry-server},},Reason:Pulling,Message:Pulling image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\",Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 21:34:47.38943304 +0000 UTC m=+242.851909362,LastTimestamp:2025-11-25 21:34:47.38943304 +0000 UTC m=+242.851909362,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.392204 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7h25w" event={"ID":"bd3392ac-5439-4adc-8e8c-1378d37225f3","Type":"ContainerStarted","Data":"66338e27ae38d6cedb17471f216d57fd2c4a666cbb08f01df6e8b7fa007043b2"} Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.394128 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.394386 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.394586 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.394745 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.395020 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.395515 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-drk5r" event={"ID":"ada3f847-455b-464f-9d23-7052e0d91f2b","Type":"ContainerStarted","Data":"6f80fecb5fc8b8c84eda15126196a705608b5398b9519e40376243af19463cd6"} Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.396441 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.396589 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.396723 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.397496 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.398351 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.402305 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.403344 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a" exitCode=0 Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.403369 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83" exitCode=0 Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.403377 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e" exitCode=0 Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.403383 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609" exitCode=2 Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.403422 4910 scope.go:117] "RemoveContainer" containerID="2536fcc7e92a1abf830f1df6f84068081fe6e72bf758f3ab6112aea4ebe5065c" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421409 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421459 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421493 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421518 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421531 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421554 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421582 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421605 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421670 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421702 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421724 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421746 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421768 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421793 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421816 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.421839 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:34:47 crc kubenswrapper[4910]: I1125 21:34:47.505661 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.409273 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"e8a38f416fa5b869b705845412ac538afc1107b69060a972df5b6e5dde0a10ee"} Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.409764 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"ce79fd3dbeeb93414df4cd2cc2c493f8fc82b7206ff1e1abe9c05299f1088369"} Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.410527 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.410965 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.411790 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.412431 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.413085 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-776qn" event={"ID":"7d6e52b7-c568-4b50-9af0-70a8ce753479","Type":"ContainerStarted","Data":"09e86b579c704c05f1a4c8da29fedadf9ddbfe9d1ff778a8851551b0005700a8"} Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.413613 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.413923 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.414205 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.414525 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.415280 4910 generic.go:334] "Generic (PLEG): container finished" podID="1790a245-602a-4fe6-b17d-61c4af8359d1" containerID="d2b96ea45a269edd0de939eae3b773be763c0347a84181515ae01dad1723bf8a" exitCode=0 Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.415338 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"1790a245-602a-4fe6-b17d-61c4af8359d1","Type":"ContainerDied","Data":"d2b96ea45a269edd0de939eae3b773be763c0347a84181515ae01dad1723bf8a"} Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.415638 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.415865 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.416137 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.416403 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.416871 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:48 crc kubenswrapper[4910]: I1125 21:34:48.417553 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.537756 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.539003 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.539666 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.540055 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.540481 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.540740 4910 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.540978 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.541276 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.652929 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.653013 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.653076 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.653491 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.653510 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.653558 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.684323 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.685101 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.685600 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.686002 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.686376 4910 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.686711 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.687153 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.754760 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1790a245-602a-4fe6-b17d-61c4af8359d1-kube-api-access\") pod \"1790a245-602a-4fe6-b17d-61c4af8359d1\" (UID: \"1790a245-602a-4fe6-b17d-61c4af8359d1\") " Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.754878 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1790a245-602a-4fe6-b17d-61c4af8359d1-kubelet-dir\") pod \"1790a245-602a-4fe6-b17d-61c4af8359d1\" (UID: \"1790a245-602a-4fe6-b17d-61c4af8359d1\") " Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.754932 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/1790a245-602a-4fe6-b17d-61c4af8359d1-var-lock\") pod \"1790a245-602a-4fe6-b17d-61c4af8359d1\" (UID: \"1790a245-602a-4fe6-b17d-61c4af8359d1\") " Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.755219 4910 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.755438 4910 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.755449 4910 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.755498 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1790a245-602a-4fe6-b17d-61c4af8359d1-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "1790a245-602a-4fe6-b17d-61c4af8359d1" (UID: "1790a245-602a-4fe6-b17d-61c4af8359d1"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.755545 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1790a245-602a-4fe6-b17d-61c4af8359d1-var-lock" (OuterVolumeSpecName: "var-lock") pod "1790a245-602a-4fe6-b17d-61c4af8359d1" (UID: "1790a245-602a-4fe6-b17d-61c4af8359d1"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.760376 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1790a245-602a-4fe6-b17d-61c4af8359d1-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1790a245-602a-4fe6-b17d-61c4af8359d1" (UID: "1790a245-602a-4fe6-b17d-61c4af8359d1"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.856734 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1790a245-602a-4fe6-b17d-61c4af8359d1-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.856784 4910 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1790a245-602a-4fe6-b17d-61c4af8359d1-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:49 crc kubenswrapper[4910]: I1125 21:34:49.856796 4910 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/1790a245-602a-4fe6-b17d-61c4af8359d1-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.434836 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"1790a245-602a-4fe6-b17d-61c4af8359d1","Type":"ContainerDied","Data":"3d87d7a05d8725e11047c82ed7f10e9b3f6bb8325a3eca9fa93a2279ea3aa6c5"} Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.434891 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3d87d7a05d8725e11047c82ed7f10e9b3f6bb8325a3eca9fa93a2279ea3aa6c5" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.435001 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.444987 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.445911 4910 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879" exitCode=0 Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.445980 4910 scope.go:117] "RemoveContainer" containerID="77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.446169 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.450595 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.451214 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.451694 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.452131 4910 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.452811 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.453089 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.470672 4910 scope.go:117] "RemoveContainer" containerID="87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.470588 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.471498 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.471728 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.472126 4910 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.472399 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.475277 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.488997 4910 scope.go:117] "RemoveContainer" containerID="a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.508007 4910 scope.go:117] "RemoveContainer" containerID="a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.525458 4910 scope.go:117] "RemoveContainer" containerID="7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.541989 4910 scope.go:117] "RemoveContainer" containerID="40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.562585 4910 scope.go:117] "RemoveContainer" containerID="77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a" Nov 25 21:34:50 crc kubenswrapper[4910]: E1125 21:34:50.563116 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\": container with ID starting with 77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a not found: ID does not exist" containerID="77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.563159 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a"} err="failed to get container status \"77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\": rpc error: code = NotFound desc = could not find container \"77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a\": container with ID starting with 77b16559e4637c0c5f8b1dd1d15f00cd75969b47009947bb4a4bcea1397ba75a not found: ID does not exist" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.563190 4910 scope.go:117] "RemoveContainer" containerID="87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83" Nov 25 21:34:50 crc kubenswrapper[4910]: E1125 21:34:50.565334 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\": container with ID starting with 87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83 not found: ID does not exist" containerID="87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.565379 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83"} err="failed to get container status \"87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\": rpc error: code = NotFound desc = could not find container \"87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83\": container with ID starting with 87c6917ae48a86158f77b145b1c9452dc095942d987d2c9a4d824d4be5799d83 not found: ID does not exist" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.565406 4910 scope.go:117] "RemoveContainer" containerID="a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e" Nov 25 21:34:50 crc kubenswrapper[4910]: E1125 21:34:50.565887 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\": container with ID starting with a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e not found: ID does not exist" containerID="a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.565919 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e"} err="failed to get container status \"a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\": rpc error: code = NotFound desc = could not find container \"a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e\": container with ID starting with a77760d11232d6c711c666fc84644d935f9c9553633f8aa506e58a3a1616375e not found: ID does not exist" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.565946 4910 scope.go:117] "RemoveContainer" containerID="a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609" Nov 25 21:34:50 crc kubenswrapper[4910]: E1125 21:34:50.566287 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\": container with ID starting with a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609 not found: ID does not exist" containerID="a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.566316 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609"} err="failed to get container status \"a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\": rpc error: code = NotFound desc = could not find container \"a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609\": container with ID starting with a5c305e54339a6863d6d49d21e5ceca691bf887a3ebe3b538afcc8ef9f52e609 not found: ID does not exist" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.566332 4910 scope.go:117] "RemoveContainer" containerID="7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879" Nov 25 21:34:50 crc kubenswrapper[4910]: E1125 21:34:50.566538 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\": container with ID starting with 7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879 not found: ID does not exist" containerID="7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.566563 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879"} err="failed to get container status \"7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\": rpc error: code = NotFound desc = could not find container \"7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879\": container with ID starting with 7ee35942a53eee92da653771d7d48684e107e00a1cf4278b7c575fa5f9dc5879 not found: ID does not exist" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.566578 4910 scope.go:117] "RemoveContainer" containerID="40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c" Nov 25 21:34:50 crc kubenswrapper[4910]: E1125 21:34:50.567059 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\": container with ID starting with 40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c not found: ID does not exist" containerID="40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.567108 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c"} err="failed to get container status \"40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\": rpc error: code = NotFound desc = could not find container \"40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c\": container with ID starting with 40df1c6f0f020263c5d2880df15caa7d842e95db171baa78f5ca947ad93dc07c not found: ID does not exist" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.832406 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.832749 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.998821 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:50 crc kubenswrapper[4910]: I1125 21:34:50.998903 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.211064 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 25 21:34:51 crc kubenswrapper[4910]: E1125 21:34:51.212741 4910 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.142:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" volumeName="registry-storage" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.396977 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.397526 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.397734 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.397890 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.398038 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.398312 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.398673 4910 status_manager.go:851] "Failed to get status for pod" podUID="2a179743-09db-4281-b030-23d453ecc1d6" pod="openshift-marketplace/community-operators-whlck" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-whlck\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.489913 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-whlck" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.490604 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.491065 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.491337 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.491610 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.491858 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.492110 4910 status_manager.go:851] "Failed to get status for pod" podUID="2a179743-09db-4281-b030-23d453ecc1d6" pod="openshift-marketplace/community-operators-whlck" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-whlck\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:51 crc kubenswrapper[4910]: I1125 21:34:51.874962 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-7h25w" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" containerName="registry-server" probeResult="failure" output=< Nov 25 21:34:51 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Nov 25 21:34:51 crc kubenswrapper[4910]: > Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.200922 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.201294 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.239921 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.240537 4910 status_manager.go:851] "Failed to get status for pod" podUID="2a179743-09db-4281-b030-23d453ecc1d6" pod="openshift-marketplace/community-operators-whlck" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-whlck\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.240962 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.241282 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.241571 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.241857 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.242070 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.410681 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.410736 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.449779 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.450322 4910 status_manager.go:851] "Failed to get status for pod" podUID="2a179743-09db-4281-b030-23d453ecc1d6" pod="openshift-marketplace/community-operators-whlck" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-whlck\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.450614 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.450860 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.451060 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.451325 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.451516 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.501917 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-drk5r" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.502438 4910 status_manager.go:851] "Failed to get status for pod" podUID="2a179743-09db-4281-b030-23d453ecc1d6" pod="openshift-marketplace/community-operators-whlck" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-whlck\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.502727 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.503075 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.503525 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.503835 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.504170 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.507107 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-776qn" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.507504 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.507942 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.508281 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.508543 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.508851 4910 status_manager.go:851] "Failed to get status for pod" podUID="2a179743-09db-4281-b030-23d453ecc1d6" pod="openshift-marketplace/community-operators-whlck" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-whlck\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:53 crc kubenswrapper[4910]: I1125 21:34:53.509101 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:55 crc kubenswrapper[4910]: I1125 21:34:55.209528 4910 status_manager.go:851] "Failed to get status for pod" podUID="2a179743-09db-4281-b030-23d453ecc1d6" pod="openshift-marketplace/community-operators-whlck" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-whlck\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:55 crc kubenswrapper[4910]: I1125 21:34:55.210068 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:55 crc kubenswrapper[4910]: I1125 21:34:55.210517 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:55 crc kubenswrapper[4910]: I1125 21:34:55.210862 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:55 crc kubenswrapper[4910]: I1125 21:34:55.211431 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:55 crc kubenswrapper[4910]: I1125 21:34:55.212142 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:56 crc kubenswrapper[4910]: E1125 21:34:56.180527 4910 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.142:6443: connect: connection refused" event="&Event{ObjectMeta:{certified-operators-776qn.187b5d80eecf2cd0 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:certified-operators-776qn,UID:7d6e52b7-c568-4b50-9af0-70a8ce753479,APIVersion:v1,ResourceVersion:29482,FieldPath:spec.containers{registry-server},},Reason:Pulling,Message:Pulling image \"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\",Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-25 21:34:47.38943304 +0000 UTC m=+242.851909362,LastTimestamp:2025-11-25 21:34:47.38943304 +0000 UTC m=+242.851909362,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 25 21:34:56 crc kubenswrapper[4910]: E1125 21:34:56.883412 4910 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:56 crc kubenswrapper[4910]: E1125 21:34:56.884500 4910 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:56 crc kubenswrapper[4910]: E1125 21:34:56.884976 4910 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:56 crc kubenswrapper[4910]: E1125 21:34:56.885446 4910 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:56 crc kubenswrapper[4910]: E1125 21:34:56.885925 4910 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:56 crc kubenswrapper[4910]: I1125 21:34:56.885979 4910 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 25 21:34:56 crc kubenswrapper[4910]: E1125 21:34:56.886514 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" interval="200ms" Nov 25 21:34:57 crc kubenswrapper[4910]: E1125 21:34:57.086916 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" interval="400ms" Nov 25 21:34:57 crc kubenswrapper[4910]: E1125 21:34:57.487347 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" interval="800ms" Nov 25 21:34:58 crc kubenswrapper[4910]: E1125 21:34:58.288189 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" interval="1.6s" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.203399 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.204161 4910 status_manager.go:851] "Failed to get status for pod" podUID="2a179743-09db-4281-b030-23d453ecc1d6" pod="openshift-marketplace/community-operators-whlck" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-whlck\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.205548 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.210076 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.210325 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.210483 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.210642 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.220505 4910 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a7faead0-20c9-4f7d-a632-16cda08af34b" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.220525 4910 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a7faead0-20c9-4f7d-a632-16cda08af34b" Nov 25 21:34:59 crc kubenswrapper[4910]: E1125 21:34:59.220727 4910 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.221115 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:34:59 crc kubenswrapper[4910]: W1125 21:34:59.243838 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-d84b0549ce648e8642f419f47c706dd3ec7402f4141dbb192053e628afa12a2a WatchSource:0}: Error finding container d84b0549ce648e8642f419f47c706dd3ec7402f4141dbb192053e628afa12a2a: Status 404 returned error can't find the container with id d84b0549ce648e8642f419f47c706dd3ec7402f4141dbb192053e628afa12a2a Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.500204 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.500296 4910 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76" exitCode=1 Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.500371 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76"} Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.501491 4910 scope.go:117] "RemoveContainer" containerID="1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.501544 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"d84b0549ce648e8642f419f47c706dd3ec7402f4141dbb192053e628afa12a2a"} Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.507993 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.508477 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.509178 4910 status_manager.go:851] "Failed to get status for pod" podUID="2a179743-09db-4281-b030-23d453ecc1d6" pod="openshift-marketplace/community-operators-whlck" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-whlck\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.509569 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.510000 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.510371 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:59 crc kubenswrapper[4910]: I1125 21:34:59.510890 4910 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:34:59 crc kubenswrapper[4910]: E1125 21:34:59.889002 4910 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.142:6443: connect: connection refused" interval="3.2s" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.513008 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.513405 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"415448337e9e6f5aeb7f85028d3f0c105eee5d9e37354565423e9e008bdb89c6"} Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.515790 4910 status_manager.go:851] "Failed to get status for pod" podUID="2a179743-09db-4281-b030-23d453ecc1d6" pod="openshift-marketplace/community-operators-whlck" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-whlck\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.516871 4910 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="9c52b6671accad88e792cea627f48156f05c8a6edb0d19a83377d7bcf34b6344" exitCode=0 Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.516928 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"9c52b6671accad88e792cea627f48156f05c8a6edb0d19a83377d7bcf34b6344"} Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.517286 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.517657 4910 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a7faead0-20c9-4f7d-a632-16cda08af34b" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.517735 4910 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a7faead0-20c9-4f7d-a632-16cda08af34b" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.517882 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: E1125 21:35:00.518456 4910 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.518734 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.519881 4910 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.520571 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.521168 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.521892 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.522377 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.522913 4910 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.523378 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.523857 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.524162 4910 status_manager.go:851] "Failed to get status for pod" podUID="2a179743-09db-4281-b030-23d453ecc1d6" pod="openshift-marketplace/community-operators-whlck" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-whlck\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.524656 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.880094 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.881479 4910 status_manager.go:851] "Failed to get status for pod" podUID="2a179743-09db-4281-b030-23d453ecc1d6" pod="openshift-marketplace/community-operators-whlck" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-whlck\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.881747 4910 status_manager.go:851] "Failed to get status for pod" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" pod="openshift-marketplace/redhat-operators-7h25w" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-7h25w\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.882000 4910 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.882273 4910 status_manager.go:851] "Failed to get status for pod" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.882477 4910 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.882670 4910 status_manager.go:851] "Failed to get status for pod" podUID="7d6e52b7-c568-4b50-9af0-70a8ce753479" pod="openshift-marketplace/certified-operators-776qn" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-776qn\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.882888 4910 status_manager.go:851] "Failed to get status for pod" podUID="ada3f847-455b-464f-9d23-7052e0d91f2b" pod="openshift-marketplace/redhat-marketplace-drk5r" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-drk5r\": dial tcp 38.102.83.142:6443: connect: connection refused" Nov 25 21:35:00 crc kubenswrapper[4910]: I1125 21:35:00.981000 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 21:35:01 crc kubenswrapper[4910]: I1125 21:35:01.531520 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"b748b7ba0abee15e9e10b00593af3c770d0782de821f9e92b62bee0919e1f391"} Nov 25 21:35:01 crc kubenswrapper[4910]: I1125 21:35:01.531628 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e39efd066bd41e255e0e84405579ac832f2f871b5ebaef6b5ff6184ce327d018"} Nov 25 21:35:01 crc kubenswrapper[4910]: I1125 21:35:01.531651 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"e05a102221f8b898aa6c8d9399fa7409365ca8cfa0c3429f33d260ddaa126491"} Nov 25 21:35:01 crc kubenswrapper[4910]: I1125 21:35:01.531671 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"4230cf3d96a13bb69cae5b72b765149098386a9d5e09c60d06684c8e5d0ae426"} Nov 25 21:35:02 crc kubenswrapper[4910]: I1125 21:35:02.542063 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f60f45dad211e9e54b4fe4c002ab9db45ff6f14226d97b8d2c9bc442ab2f4fa2"} Nov 25 21:35:02 crc kubenswrapper[4910]: I1125 21:35:02.542270 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:35:02 crc kubenswrapper[4910]: I1125 21:35:02.542365 4910 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a7faead0-20c9-4f7d-a632-16cda08af34b" Nov 25 21:35:02 crc kubenswrapper[4910]: I1125 21:35:02.542391 4910 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a7faead0-20c9-4f7d-a632-16cda08af34b" Nov 25 21:35:03 crc kubenswrapper[4910]: I1125 21:35:03.630407 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:35:03 crc kubenswrapper[4910]: I1125 21:35:03.630630 4910 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 25 21:35:03 crc kubenswrapper[4910]: I1125 21:35:03.630810 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 25 21:35:04 crc kubenswrapper[4910]: I1125 21:35:04.221490 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:35:04 crc kubenswrapper[4910]: I1125 21:35:04.221534 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:35:04 crc kubenswrapper[4910]: I1125 21:35:04.226717 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:35:07 crc kubenswrapper[4910]: I1125 21:35:07.551799 4910 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:35:07 crc kubenswrapper[4910]: I1125 21:35:07.609503 4910 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="3d658cd3-89b7-4fa2-bc21-be4d1642e0c4" Nov 25 21:35:07 crc kubenswrapper[4910]: I1125 21:35:07.968040 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:35:08 crc kubenswrapper[4910]: I1125 21:35:08.574408 4910 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a7faead0-20c9-4f7d-a632-16cda08af34b" Nov 25 21:35:08 crc kubenswrapper[4910]: I1125 21:35:08.574438 4910 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a7faead0-20c9-4f7d-a632-16cda08af34b" Nov 25 21:35:08 crc kubenswrapper[4910]: I1125 21:35:08.579276 4910 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="3d658cd3-89b7-4fa2-bc21-be4d1642e0c4" Nov 25 21:35:08 crc kubenswrapper[4910]: I1125 21:35:08.579902 4910 status_manager.go:308] "Container readiness changed before pod has synced" pod="openshift-kube-apiserver/kube-apiserver-crc" containerID="cri-o://4230cf3d96a13bb69cae5b72b765149098386a9d5e09c60d06684c8e5d0ae426" Nov 25 21:35:08 crc kubenswrapper[4910]: I1125 21:35:08.579931 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:35:09 crc kubenswrapper[4910]: I1125 21:35:09.580630 4910 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a7faead0-20c9-4f7d-a632-16cda08af34b" Nov 25 21:35:09 crc kubenswrapper[4910]: I1125 21:35:09.580926 4910 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="a7faead0-20c9-4f7d-a632-16cda08af34b" Nov 25 21:35:09 crc kubenswrapper[4910]: I1125 21:35:09.584087 4910 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="3d658cd3-89b7-4fa2-bc21-be4d1642e0c4" Nov 25 21:35:13 crc kubenswrapper[4910]: I1125 21:35:13.630787 4910 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 25 21:35:13 crc kubenswrapper[4910]: I1125 21:35:13.631027 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 25 21:35:18 crc kubenswrapper[4910]: I1125 21:35:18.063918 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 25 21:35:18 crc kubenswrapper[4910]: I1125 21:35:18.257799 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 25 21:35:18 crc kubenswrapper[4910]: I1125 21:35:18.543793 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 25 21:35:18 crc kubenswrapper[4910]: I1125 21:35:18.867501 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 25 21:35:19 crc kubenswrapper[4910]: I1125 21:35:19.547158 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 25 21:35:19 crc kubenswrapper[4910]: I1125 21:35:19.583459 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 25 21:35:19 crc kubenswrapper[4910]: I1125 21:35:19.663026 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 25 21:35:19 crc kubenswrapper[4910]: I1125 21:35:19.729118 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 25 21:35:19 crc kubenswrapper[4910]: I1125 21:35:19.802734 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 25 21:35:20 crc kubenswrapper[4910]: I1125 21:35:20.131826 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 25 21:35:20 crc kubenswrapper[4910]: I1125 21:35:20.163502 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 25 21:35:20 crc kubenswrapper[4910]: I1125 21:35:20.259101 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 25 21:35:20 crc kubenswrapper[4910]: I1125 21:35:20.494468 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 25 21:35:20 crc kubenswrapper[4910]: I1125 21:35:20.599275 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 25 21:35:20 crc kubenswrapper[4910]: I1125 21:35:20.599664 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 25 21:35:20 crc kubenswrapper[4910]: I1125 21:35:20.652027 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 25 21:35:20 crc kubenswrapper[4910]: I1125 21:35:20.656698 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 25 21:35:20 crc kubenswrapper[4910]: I1125 21:35:20.664440 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 21:35:20 crc kubenswrapper[4910]: I1125 21:35:20.733697 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 25 21:35:20 crc kubenswrapper[4910]: I1125 21:35:20.785915 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 25 21:35:20 crc kubenswrapper[4910]: I1125 21:35:20.831128 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 25 21:35:20 crc kubenswrapper[4910]: I1125 21:35:20.861543 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 25 21:35:21 crc kubenswrapper[4910]: I1125 21:35:21.021470 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 25 21:35:21 crc kubenswrapper[4910]: I1125 21:35:21.049402 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 25 21:35:21 crc kubenswrapper[4910]: I1125 21:35:21.112881 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 25 21:35:21 crc kubenswrapper[4910]: I1125 21:35:21.310874 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 25 21:35:21 crc kubenswrapper[4910]: I1125 21:35:21.328599 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 25 21:35:21 crc kubenswrapper[4910]: I1125 21:35:21.397657 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 25 21:35:21 crc kubenswrapper[4910]: I1125 21:35:21.508938 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 25 21:35:21 crc kubenswrapper[4910]: I1125 21:35:21.652676 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 25 21:35:21 crc kubenswrapper[4910]: I1125 21:35:21.802165 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 25 21:35:21 crc kubenswrapper[4910]: I1125 21:35:21.854472 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 25 21:35:22 crc kubenswrapper[4910]: I1125 21:35:22.041039 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 25 21:35:22 crc kubenswrapper[4910]: I1125 21:35:22.105803 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 25 21:35:22 crc kubenswrapper[4910]: I1125 21:35:22.197899 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 25 21:35:22 crc kubenswrapper[4910]: I1125 21:35:22.235103 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 25 21:35:22 crc kubenswrapper[4910]: I1125 21:35:22.340363 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 25 21:35:22 crc kubenswrapper[4910]: I1125 21:35:22.410231 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 25 21:35:22 crc kubenswrapper[4910]: I1125 21:35:22.618723 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 25 21:35:22 crc kubenswrapper[4910]: I1125 21:35:22.719579 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 25 21:35:22 crc kubenswrapper[4910]: I1125 21:35:22.776891 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 25 21:35:22 crc kubenswrapper[4910]: I1125 21:35:22.873846 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 25 21:35:22 crc kubenswrapper[4910]: I1125 21:35:22.892540 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 25 21:35:22 crc kubenswrapper[4910]: I1125 21:35:22.924812 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 25 21:35:22 crc kubenswrapper[4910]: I1125 21:35:22.961220 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 25 21:35:22 crc kubenswrapper[4910]: I1125 21:35:22.998557 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.007624 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.029369 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.030379 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.054204 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.070106 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.186147 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.241082 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.269633 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.278057 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.285109 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.289716 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.293729 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.312326 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.418677 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.434563 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.461534 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.480397 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.481670 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.537505 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.594556 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.629218 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.630677 4910 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.630776 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.630857 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.631990 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"415448337e9e6f5aeb7f85028d3f0c105eee5d9e37354565423e9e008bdb89c6"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.632208 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://415448337e9e6f5aeb7f85028d3f0c105eee5d9e37354565423e9e008bdb89c6" gracePeriod=30 Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.663894 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.695223 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.738558 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.753160 4910 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.762758 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.779793 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.812895 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 25 21:35:23 crc kubenswrapper[4910]: I1125 21:35:23.997936 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.042623 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.056326 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.140718 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.146268 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.165941 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.226555 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.265655 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.300066 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.480887 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.486236 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.498606 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.534913 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.573283 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.656805 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.704393 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.715170 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.730547 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.746817 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.757306 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.771830 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 25 21:35:24 crc kubenswrapper[4910]: I1125 21:35:24.944339 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.052391 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.082025 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.183839 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.226798 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.309922 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.323521 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.345998 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.359322 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.363053 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.419838 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.466928 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.555201 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.576440 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.738649 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.884167 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 25 21:35:25 crc kubenswrapper[4910]: I1125 21:35:25.981426 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.011192 4910 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.015677 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.031551 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.033574 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.130693 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.162538 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.165474 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.180319 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.267380 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.365319 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.426569 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.560377 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.575121 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.747205 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.814492 4910 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.815568 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-776qn" podStartSLOduration=41.349576979 podStartE2EDuration="44.815555287s" podCreationTimestamp="2025-11-25 21:34:42 +0000 UTC" firstStartedPulling="2025-11-25 21:34:44.334077875 +0000 UTC m=+239.796554207" lastFinishedPulling="2025-11-25 21:34:47.800056193 +0000 UTC m=+243.262532515" observedRunningTime="2025-11-25 21:35:07.562999139 +0000 UTC m=+263.025475461" watchObservedRunningTime="2025-11-25 21:35:26.815555287 +0000 UTC m=+282.278031609" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.816700 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=39.816692474999996 podStartE2EDuration="39.816692475s" podCreationTimestamp="2025-11-25 21:34:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:35:07.516937349 +0000 UTC m=+262.979413671" watchObservedRunningTime="2025-11-25 21:35:26.816692475 +0000 UTC m=+282.279168797" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.817126 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-7h25w" podStartSLOduration=42.318629859 podStartE2EDuration="46.817109855s" podCreationTimestamp="2025-11-25 21:34:40 +0000 UTC" firstStartedPulling="2025-11-25 21:34:41.313172709 +0000 UTC m=+236.775649031" lastFinishedPulling="2025-11-25 21:34:45.811652705 +0000 UTC m=+241.274129027" observedRunningTime="2025-11-25 21:35:07.502406327 +0000 UTC m=+262.964882639" watchObservedRunningTime="2025-11-25 21:35:26.817109855 +0000 UTC m=+282.279586177" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.817771 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-drk5r" podStartSLOduration=42.24969772 podStartE2EDuration="43.817765612s" podCreationTimestamp="2025-11-25 21:34:43 +0000 UTC" firstStartedPulling="2025-11-25 21:34:44.333493969 +0000 UTC m=+239.795970281" lastFinishedPulling="2025-11-25 21:34:45.901561851 +0000 UTC m=+241.364038173" observedRunningTime="2025-11-25 21:35:07.575207374 +0000 UTC m=+263.037683686" watchObservedRunningTime="2025-11-25 21:35:26.817765612 +0000 UTC m=+282.280241934" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.818842 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.818877 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.832486 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.844207 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=19.844188551 podStartE2EDuration="19.844188551s" podCreationTimestamp="2025-11-25 21:35:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:35:26.843479694 +0000 UTC m=+282.305956016" watchObservedRunningTime="2025-11-25 21:35:26.844188551 +0000 UTC m=+282.306664893" Nov 25 21:35:26 crc kubenswrapper[4910]: I1125 21:35:26.977589 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.025166 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.064126 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.083856 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.101130 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.105621 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.154394 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.167056 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.202334 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.558088 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.558443 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.576705 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.581956 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.597631 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.637980 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.669510 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.683661 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.721435 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.752861 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.797088 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.828192 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.933420 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.965399 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 25 21:35:27 crc kubenswrapper[4910]: I1125 21:35:27.977010 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.014331 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.029959 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.081053 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.090154 4910 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.138976 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.165938 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.224890 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.260258 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.262279 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.271385 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.339214 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.466068 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.495520 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.655236 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.694091 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.695033 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.771724 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.809909 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.839346 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.856959 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.910152 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.944390 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 25 21:35:28 crc kubenswrapper[4910]: I1125 21:35:28.962002 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.009193 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.170044 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.219447 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.255956 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.379375 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.456803 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.458013 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.498317 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.500149 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.515529 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.543523 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.621165 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.657058 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.773722 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.784508 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.864606 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.915581 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.975394 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 25 21:35:29 crc kubenswrapper[4910]: I1125 21:35:29.976589 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.044022 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.109867 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.177004 4910 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.177305 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://e8a38f416fa5b869b705845412ac538afc1107b69060a972df5b6e5dde0a10ee" gracePeriod=5 Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.241769 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.272261 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.302800 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.306096 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.388192 4910 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.517681 4910 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.520620 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.577767 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.650992 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.883425 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 25 21:35:30 crc kubenswrapper[4910]: I1125 21:35:30.893711 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.016668 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.071031 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.144522 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.169347 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.213800 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.229959 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.232072 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.325483 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.528813 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.636919 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.676974 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.725002 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.734878 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.784577 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.854462 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.900958 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.922661 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.958214 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 25 21:35:31 crc kubenswrapper[4910]: I1125 21:35:31.994488 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 25 21:35:32 crc kubenswrapper[4910]: I1125 21:35:32.053322 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 25 21:35:32 crc kubenswrapper[4910]: I1125 21:35:32.278548 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 25 21:35:32 crc kubenswrapper[4910]: I1125 21:35:32.391687 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 25 21:35:32 crc kubenswrapper[4910]: I1125 21:35:32.403838 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 25 21:35:32 crc kubenswrapper[4910]: I1125 21:35:32.508388 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 25 21:35:32 crc kubenswrapper[4910]: I1125 21:35:32.523672 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 25 21:35:32 crc kubenswrapper[4910]: I1125 21:35:32.524511 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 25 21:35:32 crc kubenswrapper[4910]: I1125 21:35:32.529573 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 25 21:35:32 crc kubenswrapper[4910]: I1125 21:35:32.705536 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 25 21:35:32 crc kubenswrapper[4910]: I1125 21:35:32.867093 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 25 21:35:32 crc kubenswrapper[4910]: I1125 21:35:32.995005 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 25 21:35:33 crc kubenswrapper[4910]: I1125 21:35:33.123824 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 25 21:35:33 crc kubenswrapper[4910]: I1125 21:35:33.213294 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 25 21:35:33 crc kubenswrapper[4910]: I1125 21:35:33.266132 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 25 21:35:33 crc kubenswrapper[4910]: I1125 21:35:33.408983 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 25 21:35:33 crc kubenswrapper[4910]: I1125 21:35:33.792927 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 25 21:35:33 crc kubenswrapper[4910]: I1125 21:35:33.812541 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 25 21:35:34 crc kubenswrapper[4910]: I1125 21:35:34.056839 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 25 21:35:34 crc kubenswrapper[4910]: I1125 21:35:34.068900 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 25 21:35:34 crc kubenswrapper[4910]: I1125 21:35:34.401299 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 25 21:35:34 crc kubenswrapper[4910]: I1125 21:35:34.814980 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 25 21:35:34 crc kubenswrapper[4910]: I1125 21:35:34.928742 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 25 21:35:34 crc kubenswrapper[4910]: I1125 21:35:34.955502 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.020829 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.180114 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.377543 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.723289 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.723346 4910 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="e8a38f416fa5b869b705845412ac538afc1107b69060a972df5b6e5dde0a10ee" exitCode=137 Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.723391 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ce79fd3dbeeb93414df4cd2cc2c493f8fc82b7206ff1e1abe9c05299f1088369" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.779764 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.779878 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.898843 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.898961 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.899010 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.899032 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.899070 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.899105 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.899141 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.899309 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.899461 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.899717 4910 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.899747 4910 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.899764 4910 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.899781 4910 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 25 21:35:35 crc kubenswrapper[4910]: I1125 21:35:35.910093 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:35:36 crc kubenswrapper[4910]: I1125 21:35:36.000551 4910 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 25 21:35:36 crc kubenswrapper[4910]: I1125 21:35:36.022616 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 25 21:35:36 crc kubenswrapper[4910]: I1125 21:35:36.728160 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 25 21:35:37 crc kubenswrapper[4910]: I1125 21:35:37.210115 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 25 21:35:37 crc kubenswrapper[4910]: I1125 21:35:37.210369 4910 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 25 21:35:37 crc kubenswrapper[4910]: I1125 21:35:37.220385 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 21:35:37 crc kubenswrapper[4910]: I1125 21:35:37.220428 4910 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="151f7729-a770-47ea-a2c4-12b549301d9c" Nov 25 21:35:37 crc kubenswrapper[4910]: I1125 21:35:37.223952 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 25 21:35:37 crc kubenswrapper[4910]: I1125 21:35:37.223975 4910 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="151f7729-a770-47ea-a2c4-12b549301d9c" Nov 25 21:35:44 crc kubenswrapper[4910]: I1125 21:35:44.963234 4910 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Nov 25 21:35:53 crc kubenswrapper[4910]: I1125 21:35:53.825626 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 25 21:35:53 crc kubenswrapper[4910]: I1125 21:35:53.827774 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 25 21:35:53 crc kubenswrapper[4910]: I1125 21:35:53.827817 4910 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="415448337e9e6f5aeb7f85028d3f0c105eee5d9e37354565423e9e008bdb89c6" exitCode=137 Nov 25 21:35:53 crc kubenswrapper[4910]: I1125 21:35:53.827848 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"415448337e9e6f5aeb7f85028d3f0c105eee5d9e37354565423e9e008bdb89c6"} Nov 25 21:35:53 crc kubenswrapper[4910]: I1125 21:35:53.827879 4910 scope.go:117] "RemoveContainer" containerID="1b2f9b9f4294249e03255b2dc37ff18da74c49cbf86b9ea4d69683dbe0056c76" Nov 25 21:35:54 crc kubenswrapper[4910]: I1125 21:35:54.837486 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Nov 25 21:35:54 crc kubenswrapper[4910]: I1125 21:35:54.838778 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"d41f5146eca70aca2f9d57a250b12cd5f11f0e071bb69ef077e44c9f2d7abe0c"} Nov 25 21:35:57 crc kubenswrapper[4910]: I1125 21:35:57.968290 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:36:03 crc kubenswrapper[4910]: I1125 21:36:03.630472 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:36:03 crc kubenswrapper[4910]: I1125 21:36:03.638061 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:36:07 crc kubenswrapper[4910]: I1125 21:36:07.972236 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 25 21:36:13 crc kubenswrapper[4910]: I1125 21:36:13.796790 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-swtsr"] Nov 25 21:36:13 crc kubenswrapper[4910]: I1125 21:36:13.797531 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" podUID="3e82986a-2957-4450-b122-a47b6d65fd63" containerName="controller-manager" containerID="cri-o://cdeac325c78af82e127759ce1fd8fd82b773689aeffb0af05674b990a4d8e74a" gracePeriod=30 Nov 25 21:36:13 crc kubenswrapper[4910]: I1125 21:36:13.801566 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk"] Nov 25 21:36:13 crc kubenswrapper[4910]: I1125 21:36:13.801802 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" podUID="8d4a0088-93eb-4841-84dd-052dc087ab13" containerName="route-controller-manager" containerID="cri-o://deb6d0cbe60bd32f025220468c07c8a20f5526469e621d8e97b52fda76d90e1d" gracePeriod=30 Nov 25 21:36:13 crc kubenswrapper[4910]: I1125 21:36:13.953334 4910 generic.go:334] "Generic (PLEG): container finished" podID="3e82986a-2957-4450-b122-a47b6d65fd63" containerID="cdeac325c78af82e127759ce1fd8fd82b773689aeffb0af05674b990a4d8e74a" exitCode=0 Nov 25 21:36:13 crc kubenswrapper[4910]: I1125 21:36:13.953409 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" event={"ID":"3e82986a-2957-4450-b122-a47b6d65fd63","Type":"ContainerDied","Data":"cdeac325c78af82e127759ce1fd8fd82b773689aeffb0af05674b990a4d8e74a"} Nov 25 21:36:13 crc kubenswrapper[4910]: I1125 21:36:13.958191 4910 generic.go:334] "Generic (PLEG): container finished" podID="8d4a0088-93eb-4841-84dd-052dc087ab13" containerID="deb6d0cbe60bd32f025220468c07c8a20f5526469e621d8e97b52fda76d90e1d" exitCode=0 Nov 25 21:36:13 crc kubenswrapper[4910]: I1125 21:36:13.958272 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" event={"ID":"8d4a0088-93eb-4841-84dd-052dc087ab13","Type":"ContainerDied","Data":"deb6d0cbe60bd32f025220468c07c8a20f5526469e621d8e97b52fda76d90e1d"} Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.167783 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.203960 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-config\") pod \"3e82986a-2957-4450-b122-a47b6d65fd63\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.203999 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e82986a-2957-4450-b122-a47b6d65fd63-serving-cert\") pod \"3e82986a-2957-4450-b122-a47b6d65fd63\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.204025 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-proxy-ca-bundles\") pod \"3e82986a-2957-4450-b122-a47b6d65fd63\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.204076 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g4cmj\" (UniqueName: \"kubernetes.io/projected/3e82986a-2957-4450-b122-a47b6d65fd63-kube-api-access-g4cmj\") pod \"3e82986a-2957-4450-b122-a47b6d65fd63\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.204109 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-client-ca\") pod \"3e82986a-2957-4450-b122-a47b6d65fd63\" (UID: \"3e82986a-2957-4450-b122-a47b6d65fd63\") " Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.204829 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-client-ca" (OuterVolumeSpecName: "client-ca") pod "3e82986a-2957-4450-b122-a47b6d65fd63" (UID: "3e82986a-2957-4450-b122-a47b6d65fd63"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.204862 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "3e82986a-2957-4450-b122-a47b6d65fd63" (UID: "3e82986a-2957-4450-b122-a47b6d65fd63"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.204889 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-config" (OuterVolumeSpecName: "config") pod "3e82986a-2957-4450-b122-a47b6d65fd63" (UID: "3e82986a-2957-4450-b122-a47b6d65fd63"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.210091 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e82986a-2957-4450-b122-a47b6d65fd63-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "3e82986a-2957-4450-b122-a47b6d65fd63" (UID: "3e82986a-2957-4450-b122-a47b6d65fd63"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.210594 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e82986a-2957-4450-b122-a47b6d65fd63-kube-api-access-g4cmj" (OuterVolumeSpecName: "kube-api-access-g4cmj") pod "3e82986a-2957-4450-b122-a47b6d65fd63" (UID: "3e82986a-2957-4450-b122-a47b6d65fd63"). InnerVolumeSpecName "kube-api-access-g4cmj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.237649 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.304807 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8d4a0088-93eb-4841-84dd-052dc087ab13-client-ca\") pod \"8d4a0088-93eb-4841-84dd-052dc087ab13\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.304878 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d4a0088-93eb-4841-84dd-052dc087ab13-config\") pod \"8d4a0088-93eb-4841-84dd-052dc087ab13\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.304910 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8d4a0088-93eb-4841-84dd-052dc087ab13-serving-cert\") pod \"8d4a0088-93eb-4841-84dd-052dc087ab13\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.304951 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msldt\" (UniqueName: \"kubernetes.io/projected/8d4a0088-93eb-4841-84dd-052dc087ab13-kube-api-access-msldt\") pod \"8d4a0088-93eb-4841-84dd-052dc087ab13\" (UID: \"8d4a0088-93eb-4841-84dd-052dc087ab13\") " Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.305130 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.305141 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3e82986a-2957-4450-b122-a47b6d65fd63-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.305152 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.305162 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g4cmj\" (UniqueName: \"kubernetes.io/projected/3e82986a-2957-4450-b122-a47b6d65fd63-kube-api-access-g4cmj\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.305170 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3e82986a-2957-4450-b122-a47b6d65fd63-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.306205 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d4a0088-93eb-4841-84dd-052dc087ab13-config" (OuterVolumeSpecName: "config") pod "8d4a0088-93eb-4841-84dd-052dc087ab13" (UID: "8d4a0088-93eb-4841-84dd-052dc087ab13"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.306537 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8d4a0088-93eb-4841-84dd-052dc087ab13-client-ca" (OuterVolumeSpecName: "client-ca") pod "8d4a0088-93eb-4841-84dd-052dc087ab13" (UID: "8d4a0088-93eb-4841-84dd-052dc087ab13"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.308303 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8d4a0088-93eb-4841-84dd-052dc087ab13-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8d4a0088-93eb-4841-84dd-052dc087ab13" (UID: "8d4a0088-93eb-4841-84dd-052dc087ab13"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.308376 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d4a0088-93eb-4841-84dd-052dc087ab13-kube-api-access-msldt" (OuterVolumeSpecName: "kube-api-access-msldt") pod "8d4a0088-93eb-4841-84dd-052dc087ab13" (UID: "8d4a0088-93eb-4841-84dd-052dc087ab13"). InnerVolumeSpecName "kube-api-access-msldt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.407019 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msldt\" (UniqueName: \"kubernetes.io/projected/8d4a0088-93eb-4841-84dd-052dc087ab13-kube-api-access-msldt\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.407056 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8d4a0088-93eb-4841-84dd-052dc087ab13-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.407066 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8d4a0088-93eb-4841-84dd-052dc087ab13-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.407074 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8d4a0088-93eb-4841-84dd-052dc087ab13-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.937657 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j"] Nov 25 21:36:14 crc kubenswrapper[4910]: E1125 21:36:14.937920 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d4a0088-93eb-4841-84dd-052dc087ab13" containerName="route-controller-manager" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.937935 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d4a0088-93eb-4841-84dd-052dc087ab13" containerName="route-controller-manager" Nov 25 21:36:14 crc kubenswrapper[4910]: E1125 21:36:14.937951 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" containerName="installer" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.937958 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" containerName="installer" Nov 25 21:36:14 crc kubenswrapper[4910]: E1125 21:36:14.937977 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e82986a-2957-4450-b122-a47b6d65fd63" containerName="controller-manager" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.937986 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e82986a-2957-4450-b122-a47b6d65fd63" containerName="controller-manager" Nov 25 21:36:14 crc kubenswrapper[4910]: E1125 21:36:14.937997 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.938003 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.938111 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d4a0088-93eb-4841-84dd-052dc087ab13" containerName="route-controller-manager" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.938123 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.938133 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e82986a-2957-4450-b122-a47b6d65fd63" containerName="controller-manager" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.938142 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1790a245-602a-4fe6-b17d-61c4af8359d1" containerName="installer" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.938637 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.940639 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8"] Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.941112 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.966329 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8"] Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.968259 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" event={"ID":"8d4a0088-93eb-4841-84dd-052dc087ab13","Type":"ContainerDied","Data":"551f70deba949e43ab2d5151e4dcf17505b4c033b311b2fb7010183f016ebbcd"} Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.968302 4910 scope.go:117] "RemoveContainer" containerID="deb6d0cbe60bd32f025220468c07c8a20f5526469e621d8e97b52fda76d90e1d" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.968436 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk" Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.983481 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j"] Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.986002 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" event={"ID":"3e82986a-2957-4450-b122-a47b6d65fd63","Type":"ContainerDied","Data":"b13921a5deefcf00dd84c2beafe87dc5871139d67a6e7d32148714a35309d716"} Nov 25 21:36:14 crc kubenswrapper[4910]: I1125 21:36:14.986075 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-swtsr" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.010708 4910 scope.go:117] "RemoveContainer" containerID="cdeac325c78af82e127759ce1fd8fd82b773689aeffb0af05674b990a4d8e74a" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.015656 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnwgj\" (UniqueName: \"kubernetes.io/projected/ba26bbc8-35c0-49b2-b118-12629c946524-kube-api-access-wnwgj\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.015736 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-client-ca\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.015769 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4547ffea-37ba-47ba-a774-aa3aa6d47916-serving-cert\") pod \"route-controller-manager-7b895dcf8-7hgk8\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.015796 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba26bbc8-35c0-49b2-b118-12629c946524-serving-cert\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.015820 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gm6sz\" (UniqueName: \"kubernetes.io/projected/4547ffea-37ba-47ba-a774-aa3aa6d47916-kube-api-access-gm6sz\") pod \"route-controller-manager-7b895dcf8-7hgk8\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.015843 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4547ffea-37ba-47ba-a774-aa3aa6d47916-config\") pod \"route-controller-manager-7b895dcf8-7hgk8\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.015871 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4547ffea-37ba-47ba-a774-aa3aa6d47916-client-ca\") pod \"route-controller-manager-7b895dcf8-7hgk8\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.015897 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-proxy-ca-bundles\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.015930 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-config\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.021286 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk"] Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.027543 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-2vxgk"] Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.033221 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-swtsr"] Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.037703 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-swtsr"] Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.117431 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-config\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.117545 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnwgj\" (UniqueName: \"kubernetes.io/projected/ba26bbc8-35c0-49b2-b118-12629c946524-kube-api-access-wnwgj\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.117624 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-client-ca\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.117661 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4547ffea-37ba-47ba-a774-aa3aa6d47916-serving-cert\") pod \"route-controller-manager-7b895dcf8-7hgk8\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.117699 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba26bbc8-35c0-49b2-b118-12629c946524-serving-cert\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.117730 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4547ffea-37ba-47ba-a774-aa3aa6d47916-config\") pod \"route-controller-manager-7b895dcf8-7hgk8\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.117754 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gm6sz\" (UniqueName: \"kubernetes.io/projected/4547ffea-37ba-47ba-a774-aa3aa6d47916-kube-api-access-gm6sz\") pod \"route-controller-manager-7b895dcf8-7hgk8\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.117787 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4547ffea-37ba-47ba-a774-aa3aa6d47916-client-ca\") pod \"route-controller-manager-7b895dcf8-7hgk8\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.117814 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-proxy-ca-bundles\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.119027 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-client-ca\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.119069 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-config\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.119342 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-proxy-ca-bundles\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.120216 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4547ffea-37ba-47ba-a774-aa3aa6d47916-client-ca\") pod \"route-controller-manager-7b895dcf8-7hgk8\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.120888 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4547ffea-37ba-47ba-a774-aa3aa6d47916-config\") pod \"route-controller-manager-7b895dcf8-7hgk8\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.128400 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba26bbc8-35c0-49b2-b118-12629c946524-serving-cert\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.139326 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4547ffea-37ba-47ba-a774-aa3aa6d47916-serving-cert\") pod \"route-controller-manager-7b895dcf8-7hgk8\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.140065 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gm6sz\" (UniqueName: \"kubernetes.io/projected/4547ffea-37ba-47ba-a774-aa3aa6d47916-kube-api-access-gm6sz\") pod \"route-controller-manager-7b895dcf8-7hgk8\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.140364 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnwgj\" (UniqueName: \"kubernetes.io/projected/ba26bbc8-35c0-49b2-b118-12629c946524-kube-api-access-wnwgj\") pod \"controller-manager-66cc94c8b8-lgk4j\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.210348 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e82986a-2957-4450-b122-a47b6d65fd63" path="/var/lib/kubelet/pods/3e82986a-2957-4450-b122-a47b6d65fd63/volumes" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.211379 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d4a0088-93eb-4841-84dd-052dc087ab13" path="/var/lib/kubelet/pods/8d4a0088-93eb-4841-84dd-052dc087ab13/volumes" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.287649 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.308731 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.476849 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j"] Nov 25 21:36:15 crc kubenswrapper[4910]: W1125 21:36:15.480608 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podba26bbc8_35c0_49b2_b118_12629c946524.slice/crio-8e0681e4c4e2e9a110092b8592a015f9d6e25ae9410516cf587c612c22af2710 WatchSource:0}: Error finding container 8e0681e4c4e2e9a110092b8592a015f9d6e25ae9410516cf587c612c22af2710: Status 404 returned error can't find the container with id 8e0681e4c4e2e9a110092b8592a015f9d6e25ae9410516cf587c612c22af2710 Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.552667 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8"] Nov 25 21:36:15 crc kubenswrapper[4910]: W1125 21:36:15.579558 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4547ffea_37ba_47ba_a774_aa3aa6d47916.slice/crio-f21365e36a9d8f76a12c2209e07bee8bd00db1a30cf99f2ca9a6b28aa663e4c7 WatchSource:0}: Error finding container f21365e36a9d8f76a12c2209e07bee8bd00db1a30cf99f2ca9a6b28aa663e4c7: Status 404 returned error can't find the container with id f21365e36a9d8f76a12c2209e07bee8bd00db1a30cf99f2ca9a6b28aa663e4c7 Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.992192 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" event={"ID":"4547ffea-37ba-47ba-a774-aa3aa6d47916","Type":"ContainerStarted","Data":"be1541cbe75bc1e6087a11b10aba80f55e67c9947bcca5a5a57c956e3fd57d8f"} Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.992678 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.992695 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" event={"ID":"4547ffea-37ba-47ba-a774-aa3aa6d47916","Type":"ContainerStarted","Data":"f21365e36a9d8f76a12c2209e07bee8bd00db1a30cf99f2ca9a6b28aa663e4c7"} Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.995966 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" event={"ID":"ba26bbc8-35c0-49b2-b118-12629c946524","Type":"ContainerStarted","Data":"1cdfefec836d53d6328c7cd58b0365f68a02f220da4c21c23497075e5477b6ee"} Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.996012 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" event={"ID":"ba26bbc8-35c0-49b2-b118-12629c946524","Type":"ContainerStarted","Data":"8e0681e4c4e2e9a110092b8592a015f9d6e25ae9410516cf587c612c22af2710"} Nov 25 21:36:15 crc kubenswrapper[4910]: I1125 21:36:15.996235 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:16 crc kubenswrapper[4910]: I1125 21:36:16.000055 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:16 crc kubenswrapper[4910]: I1125 21:36:16.010356 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" podStartSLOduration=3.010341057 podStartE2EDuration="3.010341057s" podCreationTimestamp="2025-11-25 21:36:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:36:16.00919124 +0000 UTC m=+331.471667562" watchObservedRunningTime="2025-11-25 21:36:16.010341057 +0000 UTC m=+331.472817369" Nov 25 21:36:16 crc kubenswrapper[4910]: I1125 21:36:16.024749 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" podStartSLOduration=3.024730738 podStartE2EDuration="3.024730738s" podCreationTimestamp="2025-11-25 21:36:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:36:16.022288451 +0000 UTC m=+331.484764773" watchObservedRunningTime="2025-11-25 21:36:16.024730738 +0000 UTC m=+331.487207060" Nov 25 21:36:16 crc kubenswrapper[4910]: I1125 21:36:16.101001 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.042280 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-wlfgp"] Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.043116 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.087347 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-wlfgp"] Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.159236 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-registry-tls\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.159314 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-bound-sa-token\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.159344 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2bpbw\" (UniqueName: \"kubernetes.io/projected/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-kube-api-access-2bpbw\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.159473 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-trusted-ca\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.159569 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-ca-trust-extracted\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.159603 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-installation-pull-secrets\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.159636 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.159859 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-registry-certificates\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.251980 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.261721 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-installation-pull-secrets\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.262004 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-registry-certificates\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.262109 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-registry-tls\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.262233 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-bound-sa-token\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.262354 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2bpbw\" (UniqueName: \"kubernetes.io/projected/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-kube-api-access-2bpbw\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.262452 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-trusted-ca\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.262545 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-ca-trust-extracted\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.263286 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-registry-certificates\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.263293 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-ca-trust-extracted\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.264463 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-trusted-ca\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.268367 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-registry-tls\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.268490 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-installation-pull-secrets\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.281216 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2bpbw\" (UniqueName: \"kubernetes.io/projected/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-kube-api-access-2bpbw\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.284258 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/da6f1115-7a5e-48c6-b73b-941e57b4b9e2-bound-sa-token\") pod \"image-registry-66df7c8f76-wlfgp\" (UID: \"da6f1115-7a5e-48c6-b73b-941e57b4b9e2\") " pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.360500 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:17 crc kubenswrapper[4910]: I1125 21:36:17.561590 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-wlfgp"] Nov 25 21:36:17 crc kubenswrapper[4910]: W1125 21:36:17.572577 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda6f1115_7a5e_48c6_b73b_941e57b4b9e2.slice/crio-ec6a98dbf2eebc65ecc9b8de4c0b379f3dccb3d5d964927419bb7ffc74eaee1f WatchSource:0}: Error finding container ec6a98dbf2eebc65ecc9b8de4c0b379f3dccb3d5d964927419bb7ffc74eaee1f: Status 404 returned error can't find the container with id ec6a98dbf2eebc65ecc9b8de4c0b379f3dccb3d5d964927419bb7ffc74eaee1f Nov 25 21:36:18 crc kubenswrapper[4910]: I1125 21:36:18.006503 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" event={"ID":"da6f1115-7a5e-48c6-b73b-941e57b4b9e2","Type":"ContainerStarted","Data":"6c31cb4076a27e838a76b4d340aa99d91ceba3b2f0b7ecfa46397d5d60ddbf93"} Nov 25 21:36:18 crc kubenswrapper[4910]: I1125 21:36:18.006557 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" event={"ID":"da6f1115-7a5e-48c6-b73b-941e57b4b9e2","Type":"ContainerStarted","Data":"ec6a98dbf2eebc65ecc9b8de4c0b379f3dccb3d5d964927419bb7ffc74eaee1f"} Nov 25 21:36:18 crc kubenswrapper[4910]: I1125 21:36:18.021126 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" podStartSLOduration=1.0211080319999999 podStartE2EDuration="1.021108032s" podCreationTimestamp="2025-11-25 21:36:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:36:18.019832282 +0000 UTC m=+333.482308624" watchObservedRunningTime="2025-11-25 21:36:18.021108032 +0000 UTC m=+333.483584354" Nov 25 21:36:19 crc kubenswrapper[4910]: I1125 21:36:19.013682 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:23 crc kubenswrapper[4910]: I1125 21:36:23.099585 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:36:23 crc kubenswrapper[4910]: I1125 21:36:23.099983 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.221574 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j"] Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.222172 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" podUID="ba26bbc8-35c0-49b2-b118-12629c946524" containerName="controller-manager" containerID="cri-o://1cdfefec836d53d6328c7cd58b0365f68a02f220da4c21c23497075e5477b6ee" gracePeriod=30 Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.245311 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8"] Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.245687 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" podUID="4547ffea-37ba-47ba-a774-aa3aa6d47916" containerName="route-controller-manager" containerID="cri-o://be1541cbe75bc1e6087a11b10aba80f55e67c9947bcca5a5a57c956e3fd57d8f" gracePeriod=30 Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.735321 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.778815 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4547ffea-37ba-47ba-a774-aa3aa6d47916-serving-cert\") pod \"4547ffea-37ba-47ba-a774-aa3aa6d47916\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.778941 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4547ffea-37ba-47ba-a774-aa3aa6d47916-config\") pod \"4547ffea-37ba-47ba-a774-aa3aa6d47916\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.778963 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4547ffea-37ba-47ba-a774-aa3aa6d47916-client-ca\") pod \"4547ffea-37ba-47ba-a774-aa3aa6d47916\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.778995 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gm6sz\" (UniqueName: \"kubernetes.io/projected/4547ffea-37ba-47ba-a774-aa3aa6d47916-kube-api-access-gm6sz\") pod \"4547ffea-37ba-47ba-a774-aa3aa6d47916\" (UID: \"4547ffea-37ba-47ba-a774-aa3aa6d47916\") " Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.780738 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4547ffea-37ba-47ba-a774-aa3aa6d47916-client-ca" (OuterVolumeSpecName: "client-ca") pod "4547ffea-37ba-47ba-a774-aa3aa6d47916" (UID: "4547ffea-37ba-47ba-a774-aa3aa6d47916"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.781688 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4547ffea-37ba-47ba-a774-aa3aa6d47916-config" (OuterVolumeSpecName: "config") pod "4547ffea-37ba-47ba-a774-aa3aa6d47916" (UID: "4547ffea-37ba-47ba-a774-aa3aa6d47916"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.786076 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4547ffea-37ba-47ba-a774-aa3aa6d47916-kube-api-access-gm6sz" (OuterVolumeSpecName: "kube-api-access-gm6sz") pod "4547ffea-37ba-47ba-a774-aa3aa6d47916" (UID: "4547ffea-37ba-47ba-a774-aa3aa6d47916"). InnerVolumeSpecName "kube-api-access-gm6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.801372 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4547ffea-37ba-47ba-a774-aa3aa6d47916-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "4547ffea-37ba-47ba-a774-aa3aa6d47916" (UID: "4547ffea-37ba-47ba-a774-aa3aa6d47916"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.831632 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.881200 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-proxy-ca-bundles\") pod \"ba26bbc8-35c0-49b2-b118-12629c946524\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.881332 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-config\") pod \"ba26bbc8-35c0-49b2-b118-12629c946524\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.881389 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba26bbc8-35c0-49b2-b118-12629c946524-serving-cert\") pod \"ba26bbc8-35c0-49b2-b118-12629c946524\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.881425 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnwgj\" (UniqueName: \"kubernetes.io/projected/ba26bbc8-35c0-49b2-b118-12629c946524-kube-api-access-wnwgj\") pod \"ba26bbc8-35c0-49b2-b118-12629c946524\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.881482 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-client-ca\") pod \"ba26bbc8-35c0-49b2-b118-12629c946524\" (UID: \"ba26bbc8-35c0-49b2-b118-12629c946524\") " Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.881733 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4547ffea-37ba-47ba-a774-aa3aa6d47916-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.881754 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/4547ffea-37ba-47ba-a774-aa3aa6d47916-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.881771 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gm6sz\" (UniqueName: \"kubernetes.io/projected/4547ffea-37ba-47ba-a774-aa3aa6d47916-kube-api-access-gm6sz\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.881785 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4547ffea-37ba-47ba-a774-aa3aa6d47916-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.882208 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "ba26bbc8-35c0-49b2-b118-12629c946524" (UID: "ba26bbc8-35c0-49b2-b118-12629c946524"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.882317 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-client-ca" (OuterVolumeSpecName: "client-ca") pod "ba26bbc8-35c0-49b2-b118-12629c946524" (UID: "ba26bbc8-35c0-49b2-b118-12629c946524"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.882325 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-config" (OuterVolumeSpecName: "config") pod "ba26bbc8-35c0-49b2-b118-12629c946524" (UID: "ba26bbc8-35c0-49b2-b118-12629c946524"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.885360 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba26bbc8-35c0-49b2-b118-12629c946524-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ba26bbc8-35c0-49b2-b118-12629c946524" (UID: "ba26bbc8-35c0-49b2-b118-12629c946524"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.885714 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba26bbc8-35c0-49b2-b118-12629c946524-kube-api-access-wnwgj" (OuterVolumeSpecName: "kube-api-access-wnwgj") pod "ba26bbc8-35c0-49b2-b118-12629c946524" (UID: "ba26bbc8-35c0-49b2-b118-12629c946524"). InnerVolumeSpecName "kube-api-access-wnwgj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.982938 4910 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.982996 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.983010 4910 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ba26bbc8-35c0-49b2-b118-12629c946524-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.983021 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnwgj\" (UniqueName: \"kubernetes.io/projected/ba26bbc8-35c0-49b2-b118-12629c946524-kube-api-access-wnwgj\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:32 crc kubenswrapper[4910]: I1125 21:36:32.983037 4910 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ba26bbc8-35c0-49b2-b118-12629c946524-client-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.091408 4910 generic.go:334] "Generic (PLEG): container finished" podID="ba26bbc8-35c0-49b2-b118-12629c946524" containerID="1cdfefec836d53d6328c7cd58b0365f68a02f220da4c21c23497075e5477b6ee" exitCode=0 Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.091461 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" event={"ID":"ba26bbc8-35c0-49b2-b118-12629c946524","Type":"ContainerDied","Data":"1cdfefec836d53d6328c7cd58b0365f68a02f220da4c21c23497075e5477b6ee"} Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.091509 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" event={"ID":"ba26bbc8-35c0-49b2-b118-12629c946524","Type":"ContainerDied","Data":"8e0681e4c4e2e9a110092b8592a015f9d6e25ae9410516cf587c612c22af2710"} Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.091530 4910 scope.go:117] "RemoveContainer" containerID="1cdfefec836d53d6328c7cd58b0365f68a02f220da4c21c23497075e5477b6ee" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.091473 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.094016 4910 generic.go:334] "Generic (PLEG): container finished" podID="4547ffea-37ba-47ba-a774-aa3aa6d47916" containerID="be1541cbe75bc1e6087a11b10aba80f55e67c9947bcca5a5a57c956e3fd57d8f" exitCode=0 Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.094054 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" event={"ID":"4547ffea-37ba-47ba-a774-aa3aa6d47916","Type":"ContainerDied","Data":"be1541cbe75bc1e6087a11b10aba80f55e67c9947bcca5a5a57c956e3fd57d8f"} Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.094079 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" event={"ID":"4547ffea-37ba-47ba-a774-aa3aa6d47916","Type":"ContainerDied","Data":"f21365e36a9d8f76a12c2209e07bee8bd00db1a30cf99f2ca9a6b28aa663e4c7"} Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.094173 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.106553 4910 scope.go:117] "RemoveContainer" containerID="1cdfefec836d53d6328c7cd58b0365f68a02f220da4c21c23497075e5477b6ee" Nov 25 21:36:33 crc kubenswrapper[4910]: E1125 21:36:33.107850 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1cdfefec836d53d6328c7cd58b0365f68a02f220da4c21c23497075e5477b6ee\": container with ID starting with 1cdfefec836d53d6328c7cd58b0365f68a02f220da4c21c23497075e5477b6ee not found: ID does not exist" containerID="1cdfefec836d53d6328c7cd58b0365f68a02f220da4c21c23497075e5477b6ee" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.107875 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1cdfefec836d53d6328c7cd58b0365f68a02f220da4c21c23497075e5477b6ee"} err="failed to get container status \"1cdfefec836d53d6328c7cd58b0365f68a02f220da4c21c23497075e5477b6ee\": rpc error: code = NotFound desc = could not find container \"1cdfefec836d53d6328c7cd58b0365f68a02f220da4c21c23497075e5477b6ee\": container with ID starting with 1cdfefec836d53d6328c7cd58b0365f68a02f220da4c21c23497075e5477b6ee not found: ID does not exist" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.107894 4910 scope.go:117] "RemoveContainer" containerID="be1541cbe75bc1e6087a11b10aba80f55e67c9947bcca5a5a57c956e3fd57d8f" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.120414 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j"] Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.129300 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-66cc94c8b8-lgk4j"] Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.129365 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8"] Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.136280 4910 scope.go:117] "RemoveContainer" containerID="be1541cbe75bc1e6087a11b10aba80f55e67c9947bcca5a5a57c956e3fd57d8f" Nov 25 21:36:33 crc kubenswrapper[4910]: E1125 21:36:33.136836 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be1541cbe75bc1e6087a11b10aba80f55e67c9947bcca5a5a57c956e3fd57d8f\": container with ID starting with be1541cbe75bc1e6087a11b10aba80f55e67c9947bcca5a5a57c956e3fd57d8f not found: ID does not exist" containerID="be1541cbe75bc1e6087a11b10aba80f55e67c9947bcca5a5a57c956e3fd57d8f" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.136877 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be1541cbe75bc1e6087a11b10aba80f55e67c9947bcca5a5a57c956e3fd57d8f"} err="failed to get container status \"be1541cbe75bc1e6087a11b10aba80f55e67c9947bcca5a5a57c956e3fd57d8f\": rpc error: code = NotFound desc = could not find container \"be1541cbe75bc1e6087a11b10aba80f55e67c9947bcca5a5a57c956e3fd57d8f\": container with ID starting with be1541cbe75bc1e6087a11b10aba80f55e67c9947bcca5a5a57c956e3fd57d8f not found: ID does not exist" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.144626 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7b895dcf8-7hgk8"] Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.210882 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4547ffea-37ba-47ba-a774-aa3aa6d47916" path="/var/lib/kubelet/pods/4547ffea-37ba-47ba-a774-aa3aa6d47916/volumes" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.211399 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba26bbc8-35c0-49b2-b118-12629c946524" path="/var/lib/kubelet/pods/ba26bbc8-35c0-49b2-b118-12629c946524/volumes" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.947782 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m"] Nov 25 21:36:33 crc kubenswrapper[4910]: E1125 21:36:33.948353 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba26bbc8-35c0-49b2-b118-12629c946524" containerName="controller-manager" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.948366 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba26bbc8-35c0-49b2-b118-12629c946524" containerName="controller-manager" Nov 25 21:36:33 crc kubenswrapper[4910]: E1125 21:36:33.948392 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4547ffea-37ba-47ba-a774-aa3aa6d47916" containerName="route-controller-manager" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.948401 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4547ffea-37ba-47ba-a774-aa3aa6d47916" containerName="route-controller-manager" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.948519 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba26bbc8-35c0-49b2-b118-12629c946524" containerName="controller-manager" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.948528 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4547ffea-37ba-47ba-a774-aa3aa6d47916" containerName="route-controller-manager" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.948906 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.950652 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-86996764bd-df5jt"] Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.952151 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.952908 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.953116 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.953202 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.953297 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.953336 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.953405 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.953768 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.954682 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.954828 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.954939 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.955469 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.956874 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.957803 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m"] Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.964091 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 25 21:36:33 crc kubenswrapper[4910]: I1125 21:36:33.969523 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-86996764bd-df5jt"] Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.000207 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41e69657-7488-463e-ab13-d59707be026f-serving-cert\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.000287 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41e69657-7488-463e-ab13-d59707be026f-config\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.000338 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/41e69657-7488-463e-ab13-d59707be026f-proxy-ca-bundles\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.000413 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6f3cc75-67e4-46b6-9fb8-05ee48addbfc-config\") pod \"route-controller-manager-6d44f7fc68-pzd6m\" (UID: \"b6f3cc75-67e4-46b6-9fb8-05ee48addbfc\") " pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.000446 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b6f3cc75-67e4-46b6-9fb8-05ee48addbfc-serving-cert\") pod \"route-controller-manager-6d44f7fc68-pzd6m\" (UID: \"b6f3cc75-67e4-46b6-9fb8-05ee48addbfc\") " pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.000474 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b6f3cc75-67e4-46b6-9fb8-05ee48addbfc-client-ca\") pod \"route-controller-manager-6d44f7fc68-pzd6m\" (UID: \"b6f3cc75-67e4-46b6-9fb8-05ee48addbfc\") " pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.000489 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/41e69657-7488-463e-ab13-d59707be026f-client-ca\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.000520 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wn9qq\" (UniqueName: \"kubernetes.io/projected/b6f3cc75-67e4-46b6-9fb8-05ee48addbfc-kube-api-access-wn9qq\") pod \"route-controller-manager-6d44f7fc68-pzd6m\" (UID: \"b6f3cc75-67e4-46b6-9fb8-05ee48addbfc\") " pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.000539 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4h27\" (UniqueName: \"kubernetes.io/projected/41e69657-7488-463e-ab13-d59707be026f-kube-api-access-v4h27\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.102494 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b6f3cc75-67e4-46b6-9fb8-05ee48addbfc-client-ca\") pod \"route-controller-manager-6d44f7fc68-pzd6m\" (UID: \"b6f3cc75-67e4-46b6-9fb8-05ee48addbfc\") " pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.102557 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/41e69657-7488-463e-ab13-d59707be026f-client-ca\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.102608 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4h27\" (UniqueName: \"kubernetes.io/projected/41e69657-7488-463e-ab13-d59707be026f-kube-api-access-v4h27\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.102644 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wn9qq\" (UniqueName: \"kubernetes.io/projected/b6f3cc75-67e4-46b6-9fb8-05ee48addbfc-kube-api-access-wn9qq\") pod \"route-controller-manager-6d44f7fc68-pzd6m\" (UID: \"b6f3cc75-67e4-46b6-9fb8-05ee48addbfc\") " pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.102684 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41e69657-7488-463e-ab13-d59707be026f-serving-cert\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.102722 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41e69657-7488-463e-ab13-d59707be026f-config\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.102756 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/41e69657-7488-463e-ab13-d59707be026f-proxy-ca-bundles\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.102789 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6f3cc75-67e4-46b6-9fb8-05ee48addbfc-config\") pod \"route-controller-manager-6d44f7fc68-pzd6m\" (UID: \"b6f3cc75-67e4-46b6-9fb8-05ee48addbfc\") " pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.102822 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b6f3cc75-67e4-46b6-9fb8-05ee48addbfc-serving-cert\") pod \"route-controller-manager-6d44f7fc68-pzd6m\" (UID: \"b6f3cc75-67e4-46b6-9fb8-05ee48addbfc\") " pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.106214 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/41e69657-7488-463e-ab13-d59707be026f-client-ca\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.106332 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/41e69657-7488-463e-ab13-d59707be026f-config\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.107139 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/41e69657-7488-463e-ab13-d59707be026f-proxy-ca-bundles\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.107304 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6f3cc75-67e4-46b6-9fb8-05ee48addbfc-config\") pod \"route-controller-manager-6d44f7fc68-pzd6m\" (UID: \"b6f3cc75-67e4-46b6-9fb8-05ee48addbfc\") " pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.107859 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b6f3cc75-67e4-46b6-9fb8-05ee48addbfc-client-ca\") pod \"route-controller-manager-6d44f7fc68-pzd6m\" (UID: \"b6f3cc75-67e4-46b6-9fb8-05ee48addbfc\") " pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.109508 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/41e69657-7488-463e-ab13-d59707be026f-serving-cert\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.117126 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b6f3cc75-67e4-46b6-9fb8-05ee48addbfc-serving-cert\") pod \"route-controller-manager-6d44f7fc68-pzd6m\" (UID: \"b6f3cc75-67e4-46b6-9fb8-05ee48addbfc\") " pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.127944 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4h27\" (UniqueName: \"kubernetes.io/projected/41e69657-7488-463e-ab13-d59707be026f-kube-api-access-v4h27\") pod \"controller-manager-86996764bd-df5jt\" (UID: \"41e69657-7488-463e-ab13-d59707be026f\") " pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.128561 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wn9qq\" (UniqueName: \"kubernetes.io/projected/b6f3cc75-67e4-46b6-9fb8-05ee48addbfc-kube-api-access-wn9qq\") pod \"route-controller-manager-6d44f7fc68-pzd6m\" (UID: \"b6f3cc75-67e4-46b6-9fb8-05ee48addbfc\") " pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.771738 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:34 crc kubenswrapper[4910]: I1125 21:36:34.776901 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:35 crc kubenswrapper[4910]: I1125 21:36:35.250457 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m"] Nov 25 21:36:35 crc kubenswrapper[4910]: W1125 21:36:35.257157 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb6f3cc75_67e4_46b6_9fb8_05ee48addbfc.slice/crio-97cbf578c23f5651a43c62125cfef8bf45c4e7c38ee3bc40d53e07d5d6e8f1b0 WatchSource:0}: Error finding container 97cbf578c23f5651a43c62125cfef8bf45c4e7c38ee3bc40d53e07d5d6e8f1b0: Status 404 returned error can't find the container with id 97cbf578c23f5651a43c62125cfef8bf45c4e7c38ee3bc40d53e07d5d6e8f1b0 Nov 25 21:36:35 crc kubenswrapper[4910]: I1125 21:36:35.293609 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-86996764bd-df5jt"] Nov 25 21:36:36 crc kubenswrapper[4910]: I1125 21:36:36.119272 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" event={"ID":"41e69657-7488-463e-ab13-d59707be026f","Type":"ContainerStarted","Data":"3eff7bc58381fc840e48054803548e0278d8857594efb91f3f5343ecc8f73511"} Nov 25 21:36:36 crc kubenswrapper[4910]: I1125 21:36:36.119556 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" event={"ID":"41e69657-7488-463e-ab13-d59707be026f","Type":"ContainerStarted","Data":"ee92f94494000a849ab4ae144462cfc58f0ad623f13872826a4982609519944e"} Nov 25 21:36:36 crc kubenswrapper[4910]: I1125 21:36:36.120545 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:36 crc kubenswrapper[4910]: I1125 21:36:36.121775 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" event={"ID":"b6f3cc75-67e4-46b6-9fb8-05ee48addbfc","Type":"ContainerStarted","Data":"7fb0c8cb4e3e41e68b193a0313da1c2cc5568eac69d105fbde54ee82343e72c8"} Nov 25 21:36:36 crc kubenswrapper[4910]: I1125 21:36:36.121802 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" event={"ID":"b6f3cc75-67e4-46b6-9fb8-05ee48addbfc","Type":"ContainerStarted","Data":"97cbf578c23f5651a43c62125cfef8bf45c4e7c38ee3bc40d53e07d5d6e8f1b0"} Nov 25 21:36:36 crc kubenswrapper[4910]: I1125 21:36:36.122233 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:36 crc kubenswrapper[4910]: I1125 21:36:36.126101 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" Nov 25 21:36:36 crc kubenswrapper[4910]: I1125 21:36:36.129471 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" Nov 25 21:36:36 crc kubenswrapper[4910]: I1125 21:36:36.141169 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-86996764bd-df5jt" podStartSLOduration=4.141151761 podStartE2EDuration="4.141151761s" podCreationTimestamp="2025-11-25 21:36:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:36:36.138225432 +0000 UTC m=+351.600701754" watchObservedRunningTime="2025-11-25 21:36:36.141151761 +0000 UTC m=+351.603628083" Nov 25 21:36:36 crc kubenswrapper[4910]: I1125 21:36:36.164788 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6d44f7fc68-pzd6m" podStartSLOduration=4.164769861 podStartE2EDuration="4.164769861s" podCreationTimestamp="2025-11-25 21:36:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:36:36.164371722 +0000 UTC m=+351.626848044" watchObservedRunningTime="2025-11-25 21:36:36.164769861 +0000 UTC m=+351.627246183" Nov 25 21:36:37 crc kubenswrapper[4910]: I1125 21:36:37.366003 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-wlfgp" Nov 25 21:36:37 crc kubenswrapper[4910]: I1125 21:36:37.445489 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kdgbm"] Nov 25 21:36:53 crc kubenswrapper[4910]: I1125 21:36:53.098861 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:36:53 crc kubenswrapper[4910]: I1125 21:36:53.099687 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:37:02 crc kubenswrapper[4910]: I1125 21:37:02.485733 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" podUID="3f10744e-fb73-4689-979b-59c32ba0ae6a" containerName="registry" containerID="cri-o://229e677fb2d05704ad59d47691b40836baf74c15d43123dd1916bcbfe6cbdfc7" gracePeriod=30 Nov 25 21:37:02 crc kubenswrapper[4910]: I1125 21:37:02.952902 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.003685 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3f10744e-fb73-4689-979b-59c32ba0ae6a-trusted-ca\") pod \"3f10744e-fb73-4689-979b-59c32ba0ae6a\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.003770 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xxpch\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-kube-api-access-xxpch\") pod \"3f10744e-fb73-4689-979b-59c32ba0ae6a\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.003988 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"3f10744e-fb73-4689-979b-59c32ba0ae6a\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.004054 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-registry-tls\") pod \"3f10744e-fb73-4689-979b-59c32ba0ae6a\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.004079 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3f10744e-fb73-4689-979b-59c32ba0ae6a-installation-pull-secrets\") pod \"3f10744e-fb73-4689-979b-59c32ba0ae6a\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.004107 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-bound-sa-token\") pod \"3f10744e-fb73-4689-979b-59c32ba0ae6a\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.004127 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3f10744e-fb73-4689-979b-59c32ba0ae6a-ca-trust-extracted\") pod \"3f10744e-fb73-4689-979b-59c32ba0ae6a\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.004145 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3f10744e-fb73-4689-979b-59c32ba0ae6a-registry-certificates\") pod \"3f10744e-fb73-4689-979b-59c32ba0ae6a\" (UID: \"3f10744e-fb73-4689-979b-59c32ba0ae6a\") " Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.004959 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f10744e-fb73-4689-979b-59c32ba0ae6a-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "3f10744e-fb73-4689-979b-59c32ba0ae6a" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.006337 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3f10744e-fb73-4689-979b-59c32ba0ae6a-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "3f10744e-fb73-4689-979b-59c32ba0ae6a" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.010287 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3f10744e-fb73-4689-979b-59c32ba0ae6a-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "3f10744e-fb73-4689-979b-59c32ba0ae6a" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.012157 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "3f10744e-fb73-4689-979b-59c32ba0ae6a" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.013197 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "3f10744e-fb73-4689-979b-59c32ba0ae6a" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.015460 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "3f10744e-fb73-4689-979b-59c32ba0ae6a" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.015612 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-kube-api-access-xxpch" (OuterVolumeSpecName: "kube-api-access-xxpch") pod "3f10744e-fb73-4689-979b-59c32ba0ae6a" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a"). InnerVolumeSpecName "kube-api-access-xxpch". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.021293 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f10744e-fb73-4689-979b-59c32ba0ae6a-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "3f10744e-fb73-4689-979b-59c32ba0ae6a" (UID: "3f10744e-fb73-4689-979b-59c32ba0ae6a"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.105088 4910 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.105128 4910 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/3f10744e-fb73-4689-979b-59c32ba0ae6a-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.105139 4910 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.105148 4910 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/3f10744e-fb73-4689-979b-59c32ba0ae6a-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.105157 4910 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/3f10744e-fb73-4689-979b-59c32ba0ae6a-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.105165 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3f10744e-fb73-4689-979b-59c32ba0ae6a-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.105172 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xxpch\" (UniqueName: \"kubernetes.io/projected/3f10744e-fb73-4689-979b-59c32ba0ae6a-kube-api-access-xxpch\") on node \"crc\" DevicePath \"\"" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.289003 4910 generic.go:334] "Generic (PLEG): container finished" podID="3f10744e-fb73-4689-979b-59c32ba0ae6a" containerID="229e677fb2d05704ad59d47691b40836baf74c15d43123dd1916bcbfe6cbdfc7" exitCode=0 Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.289042 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" event={"ID":"3f10744e-fb73-4689-979b-59c32ba0ae6a","Type":"ContainerDied","Data":"229e677fb2d05704ad59d47691b40836baf74c15d43123dd1916bcbfe6cbdfc7"} Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.289069 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" event={"ID":"3f10744e-fb73-4689-979b-59c32ba0ae6a","Type":"ContainerDied","Data":"e66ff8f6d6148a9a78004bea2eac7d6c36acd6f388edc1e42c7092b64cbce92b"} Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.289069 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-kdgbm" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.289085 4910 scope.go:117] "RemoveContainer" containerID="229e677fb2d05704ad59d47691b40836baf74c15d43123dd1916bcbfe6cbdfc7" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.307185 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kdgbm"] Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.311274 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-kdgbm"] Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.314122 4910 scope.go:117] "RemoveContainer" containerID="229e677fb2d05704ad59d47691b40836baf74c15d43123dd1916bcbfe6cbdfc7" Nov 25 21:37:03 crc kubenswrapper[4910]: E1125 21:37:03.314668 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"229e677fb2d05704ad59d47691b40836baf74c15d43123dd1916bcbfe6cbdfc7\": container with ID starting with 229e677fb2d05704ad59d47691b40836baf74c15d43123dd1916bcbfe6cbdfc7 not found: ID does not exist" containerID="229e677fb2d05704ad59d47691b40836baf74c15d43123dd1916bcbfe6cbdfc7" Nov 25 21:37:03 crc kubenswrapper[4910]: I1125 21:37:03.314702 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"229e677fb2d05704ad59d47691b40836baf74c15d43123dd1916bcbfe6cbdfc7"} err="failed to get container status \"229e677fb2d05704ad59d47691b40836baf74c15d43123dd1916bcbfe6cbdfc7\": rpc error: code = NotFound desc = could not find container \"229e677fb2d05704ad59d47691b40836baf74c15d43123dd1916bcbfe6cbdfc7\": container with ID starting with 229e677fb2d05704ad59d47691b40836baf74c15d43123dd1916bcbfe6cbdfc7 not found: ID does not exist" Nov 25 21:37:05 crc kubenswrapper[4910]: I1125 21:37:05.215150 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3f10744e-fb73-4689-979b-59c32ba0ae6a" path="/var/lib/kubelet/pods/3f10744e-fb73-4689-979b-59c32ba0ae6a/volumes" Nov 25 21:37:23 crc kubenswrapper[4910]: I1125 21:37:23.099206 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:37:23 crc kubenswrapper[4910]: I1125 21:37:23.099775 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:37:23 crc kubenswrapper[4910]: I1125 21:37:23.099831 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:37:23 crc kubenswrapper[4910]: I1125 21:37:23.100856 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5e4a10520c346a1193c3483b8b384d43f0615f88a77b72ac7b42de74d6a3c5d6"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 21:37:23 crc kubenswrapper[4910]: I1125 21:37:23.100946 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://5e4a10520c346a1193c3483b8b384d43f0615f88a77b72ac7b42de74d6a3c5d6" gracePeriod=600 Nov 25 21:37:23 crc kubenswrapper[4910]: I1125 21:37:23.422175 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="5e4a10520c346a1193c3483b8b384d43f0615f88a77b72ac7b42de74d6a3c5d6" exitCode=0 Nov 25 21:37:23 crc kubenswrapper[4910]: I1125 21:37:23.422223 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"5e4a10520c346a1193c3483b8b384d43f0615f88a77b72ac7b42de74d6a3c5d6"} Nov 25 21:37:23 crc kubenswrapper[4910]: I1125 21:37:23.422284 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"30286bee60e1a7a80129654be478335ca47bde695cc33c16b01fd38ede68a6b8"} Nov 25 21:37:23 crc kubenswrapper[4910]: I1125 21:37:23.422307 4910 scope.go:117] "RemoveContainer" containerID="e68ee0b2cfff156e29e3b28680905755554572cfa59932b7e60c5e077b840547" Nov 25 21:39:23 crc kubenswrapper[4910]: I1125 21:39:23.099025 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:39:23 crc kubenswrapper[4910]: I1125 21:39:23.099685 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:39:53 crc kubenswrapper[4910]: I1125 21:39:53.099587 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:39:53 crc kubenswrapper[4910]: I1125 21:39:53.100324 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.857375 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fxdt9"] Nov 25 21:40:16 crc kubenswrapper[4910]: E1125 21:40:16.858621 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f10744e-fb73-4689-979b-59c32ba0ae6a" containerName="registry" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.858647 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f10744e-fb73-4689-979b-59c32ba0ae6a" containerName="registry" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.858823 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f10744e-fb73-4689-979b-59c32ba0ae6a" containerName="registry" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.859465 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-fxdt9" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.863206 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-kl2mf"] Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.863829 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-kl2mf" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.865092 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.865491 4910 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-kb8xg" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.865686 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.870313 4910 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-fn7rx" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.873862 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fxdt9"] Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.878844 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-78cb6\" (UniqueName: \"kubernetes.io/projected/a1017150-9116-4453-84f8-bc8148ee529e-kube-api-access-78cb6\") pod \"cert-manager-cainjector-7f985d654d-fxdt9\" (UID: \"a1017150-9116-4453-84f8-bc8148ee529e\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fxdt9" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.878919 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5wmwd\" (UniqueName: \"kubernetes.io/projected/949d68d4-09e9-4d53-a0d6-0d667e0c7b09-kube-api-access-5wmwd\") pod \"cert-manager-5b446d88c5-kl2mf\" (UID: \"949d68d4-09e9-4d53-a0d6-0d667e0c7b09\") " pod="cert-manager/cert-manager-5b446d88c5-kl2mf" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.879053 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-kl2mf"] Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.893942 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-xtxpk"] Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.894835 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-xtxpk" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.899684 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-xtxpk"] Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.899884 4910 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-54w5n" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.980703 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5wmwd\" (UniqueName: \"kubernetes.io/projected/949d68d4-09e9-4d53-a0d6-0d667e0c7b09-kube-api-access-5wmwd\") pod \"cert-manager-5b446d88c5-kl2mf\" (UID: \"949d68d4-09e9-4d53-a0d6-0d667e0c7b09\") " pod="cert-manager/cert-manager-5b446d88c5-kl2mf" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.980793 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-78cb6\" (UniqueName: \"kubernetes.io/projected/a1017150-9116-4453-84f8-bc8148ee529e-kube-api-access-78cb6\") pod \"cert-manager-cainjector-7f985d654d-fxdt9\" (UID: \"a1017150-9116-4453-84f8-bc8148ee529e\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fxdt9" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.980848 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8t4n6\" (UniqueName: \"kubernetes.io/projected/ce2757a9-3fa1-4cf5-9ace-bc7cc1922640-kube-api-access-8t4n6\") pod \"cert-manager-webhook-5655c58dd6-xtxpk\" (UID: \"ce2757a9-3fa1-4cf5-9ace-bc7cc1922640\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-xtxpk" Nov 25 21:40:16 crc kubenswrapper[4910]: I1125 21:40:16.999431 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-78cb6\" (UniqueName: \"kubernetes.io/projected/a1017150-9116-4453-84f8-bc8148ee529e-kube-api-access-78cb6\") pod \"cert-manager-cainjector-7f985d654d-fxdt9\" (UID: \"a1017150-9116-4453-84f8-bc8148ee529e\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-fxdt9" Nov 25 21:40:17 crc kubenswrapper[4910]: I1125 21:40:17.010036 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5wmwd\" (UniqueName: \"kubernetes.io/projected/949d68d4-09e9-4d53-a0d6-0d667e0c7b09-kube-api-access-5wmwd\") pod \"cert-manager-5b446d88c5-kl2mf\" (UID: \"949d68d4-09e9-4d53-a0d6-0d667e0c7b09\") " pod="cert-manager/cert-manager-5b446d88c5-kl2mf" Nov 25 21:40:17 crc kubenswrapper[4910]: I1125 21:40:17.082159 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8t4n6\" (UniqueName: \"kubernetes.io/projected/ce2757a9-3fa1-4cf5-9ace-bc7cc1922640-kube-api-access-8t4n6\") pod \"cert-manager-webhook-5655c58dd6-xtxpk\" (UID: \"ce2757a9-3fa1-4cf5-9ace-bc7cc1922640\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-xtxpk" Nov 25 21:40:17 crc kubenswrapper[4910]: I1125 21:40:17.103900 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8t4n6\" (UniqueName: \"kubernetes.io/projected/ce2757a9-3fa1-4cf5-9ace-bc7cc1922640-kube-api-access-8t4n6\") pod \"cert-manager-webhook-5655c58dd6-xtxpk\" (UID: \"ce2757a9-3fa1-4cf5-9ace-bc7cc1922640\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-xtxpk" Nov 25 21:40:17 crc kubenswrapper[4910]: I1125 21:40:17.187833 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-fxdt9" Nov 25 21:40:17 crc kubenswrapper[4910]: I1125 21:40:17.199898 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-kl2mf" Nov 25 21:40:17 crc kubenswrapper[4910]: I1125 21:40:17.210145 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-xtxpk" Nov 25 21:40:17 crc kubenswrapper[4910]: I1125 21:40:17.408821 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-kl2mf"] Nov 25 21:40:17 crc kubenswrapper[4910]: I1125 21:40:17.419851 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 21:40:17 crc kubenswrapper[4910]: I1125 21:40:17.456850 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-fxdt9"] Nov 25 21:40:17 crc kubenswrapper[4910]: I1125 21:40:17.486268 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-xtxpk"] Nov 25 21:40:17 crc kubenswrapper[4910]: W1125 21:40:17.488291 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podce2757a9_3fa1_4cf5_9ace_bc7cc1922640.slice/crio-25b8789b02cf72ff664635a471fdb2de106b916a390c7b20ab625f04e4e737fc WatchSource:0}: Error finding container 25b8789b02cf72ff664635a471fdb2de106b916a390c7b20ab625f04e4e737fc: Status 404 returned error can't find the container with id 25b8789b02cf72ff664635a471fdb2de106b916a390c7b20ab625f04e4e737fc Nov 25 21:40:17 crc kubenswrapper[4910]: I1125 21:40:17.541871 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-xtxpk" event={"ID":"ce2757a9-3fa1-4cf5-9ace-bc7cc1922640","Type":"ContainerStarted","Data":"25b8789b02cf72ff664635a471fdb2de106b916a390c7b20ab625f04e4e737fc"} Nov 25 21:40:17 crc kubenswrapper[4910]: I1125 21:40:17.543265 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-kl2mf" event={"ID":"949d68d4-09e9-4d53-a0d6-0d667e0c7b09","Type":"ContainerStarted","Data":"34afede1993b23c40474641a476d604b135b9a2c8f8fa2c2defc934e72595350"} Nov 25 21:40:17 crc kubenswrapper[4910]: I1125 21:40:17.544066 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fxdt9" event={"ID":"a1017150-9116-4453-84f8-bc8148ee529e","Type":"ContainerStarted","Data":"440c25048139a0ebbd137312ac9533c93bf8c07c177fd0289b0e9a355579fdf5"} Nov 25 21:40:21 crc kubenswrapper[4910]: I1125 21:40:21.568865 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-xtxpk" event={"ID":"ce2757a9-3fa1-4cf5-9ace-bc7cc1922640","Type":"ContainerStarted","Data":"7c0f3e9e91fa6fe1bc54ad73950a3f65609b0ec42d0695d3316b4b61848ae34d"} Nov 25 21:40:21 crc kubenswrapper[4910]: I1125 21:40:21.569581 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-xtxpk" Nov 25 21:40:21 crc kubenswrapper[4910]: I1125 21:40:21.570519 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-kl2mf" event={"ID":"949d68d4-09e9-4d53-a0d6-0d667e0c7b09","Type":"ContainerStarted","Data":"68d53ca6f2b64ca07997b011627bf9f211e768a1165f51b44e11bb5258963b01"} Nov 25 21:40:21 crc kubenswrapper[4910]: I1125 21:40:21.572051 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-fxdt9" event={"ID":"a1017150-9116-4453-84f8-bc8148ee529e","Type":"ContainerStarted","Data":"8fcabc8d579378b92a2eecd7b647339481ecd00a547ff090c152c2a6b031bd1e"} Nov 25 21:40:21 crc kubenswrapper[4910]: I1125 21:40:21.588987 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-xtxpk" podStartSLOduration=2.28397707 podStartE2EDuration="5.588968601s" podCreationTimestamp="2025-11-25 21:40:16 +0000 UTC" firstStartedPulling="2025-11-25 21:40:17.491081469 +0000 UTC m=+572.953557791" lastFinishedPulling="2025-11-25 21:40:20.79607299 +0000 UTC m=+576.258549322" observedRunningTime="2025-11-25 21:40:21.582039363 +0000 UTC m=+577.044515735" watchObservedRunningTime="2025-11-25 21:40:21.588968601 +0000 UTC m=+577.051444923" Nov 25 21:40:21 crc kubenswrapper[4910]: I1125 21:40:21.597674 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-fxdt9" podStartSLOduration=2.287298705 podStartE2EDuration="5.59765243s" podCreationTimestamp="2025-11-25 21:40:16 +0000 UTC" firstStartedPulling="2025-11-25 21:40:17.463702216 +0000 UTC m=+572.926178538" lastFinishedPulling="2025-11-25 21:40:20.774055931 +0000 UTC m=+576.236532263" observedRunningTime="2025-11-25 21:40:21.593331836 +0000 UTC m=+577.055808168" watchObservedRunningTime="2025-11-25 21:40:21.59765243 +0000 UTC m=+577.060128762" Nov 25 21:40:21 crc kubenswrapper[4910]: I1125 21:40:21.608524 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-kl2mf" podStartSLOduration=2.232223091 podStartE2EDuration="5.60850567s" podCreationTimestamp="2025-11-25 21:40:16 +0000 UTC" firstStartedPulling="2025-11-25 21:40:17.419652847 +0000 UTC m=+572.882129169" lastFinishedPulling="2025-11-25 21:40:20.795935426 +0000 UTC m=+576.258411748" observedRunningTime="2025-11-25 21:40:21.606913544 +0000 UTC m=+577.069389886" watchObservedRunningTime="2025-11-25 21:40:21.60850567 +0000 UTC m=+577.070981992" Nov 25 21:40:23 crc kubenswrapper[4910]: I1125 21:40:23.098998 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:40:23 crc kubenswrapper[4910]: I1125 21:40:23.099078 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:40:23 crc kubenswrapper[4910]: I1125 21:40:23.099136 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:40:23 crc kubenswrapper[4910]: I1125 21:40:23.099757 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"30286bee60e1a7a80129654be478335ca47bde695cc33c16b01fd38ede68a6b8"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 21:40:23 crc kubenswrapper[4910]: I1125 21:40:23.099816 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://30286bee60e1a7a80129654be478335ca47bde695cc33c16b01fd38ede68a6b8" gracePeriod=600 Nov 25 21:40:23 crc kubenswrapper[4910]: I1125 21:40:23.591366 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="30286bee60e1a7a80129654be478335ca47bde695cc33c16b01fd38ede68a6b8" exitCode=0 Nov 25 21:40:23 crc kubenswrapper[4910]: I1125 21:40:23.591448 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"30286bee60e1a7a80129654be478335ca47bde695cc33c16b01fd38ede68a6b8"} Nov 25 21:40:23 crc kubenswrapper[4910]: I1125 21:40:23.592472 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"1d449a51cc6d0f8601906171d97e528f4369d984db9458b4317c75e761fb730e"} Nov 25 21:40:23 crc kubenswrapper[4910]: I1125 21:40:23.592556 4910 scope.go:117] "RemoveContainer" containerID="5e4a10520c346a1193c3483b8b384d43f0615f88a77b72ac7b42de74d6a3c5d6" Nov 25 21:40:27 crc kubenswrapper[4910]: I1125 21:40:27.216059 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-xtxpk" Nov 25 21:40:27 crc kubenswrapper[4910]: I1125 21:40:27.636734 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-cvj2j"] Nov 25 21:40:27 crc kubenswrapper[4910]: I1125 21:40:27.637363 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovn-controller" containerID="cri-o://055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7" gracePeriod=30 Nov 25 21:40:27 crc kubenswrapper[4910]: I1125 21:40:27.637460 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="nbdb" containerID="cri-o://425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326" gracePeriod=30 Nov 25 21:40:27 crc kubenswrapper[4910]: I1125 21:40:27.637578 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="northd" containerID="cri-o://4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544" gracePeriod=30 Nov 25 21:40:27 crc kubenswrapper[4910]: I1125 21:40:27.637628 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="kube-rbac-proxy-node" containerID="cri-o://c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978" gracePeriod=30 Nov 25 21:40:27 crc kubenswrapper[4910]: I1125 21:40:27.637649 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovn-acl-logging" containerID="cri-o://6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515" gracePeriod=30 Nov 25 21:40:27 crc kubenswrapper[4910]: I1125 21:40:27.637780 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204" gracePeriod=30 Nov 25 21:40:27 crc kubenswrapper[4910]: I1125 21:40:27.638457 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="sbdb" containerID="cri-o://a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e" gracePeriod=30 Nov 25 21:40:27 crc kubenswrapper[4910]: I1125 21:40:27.696622 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" containerID="cri-o://0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b" gracePeriod=30 Nov 25 21:40:27 crc kubenswrapper[4910]: E1125 21:40:27.769001 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 25 21:40:27 crc kubenswrapper[4910]: E1125 21:40:27.770420 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 25 21:40:27 crc kubenswrapper[4910]: E1125 21:40:27.772540 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 25 21:40:27 crc kubenswrapper[4910]: E1125 21:40:27.773113 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326 is running failed: container process not found" containerID="425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"nb\"\n"] Nov 25 21:40:27 crc kubenswrapper[4910]: E1125 21:40:27.773359 4910 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326 is running failed: container process not found" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="nbdb" Nov 25 21:40:27 crc kubenswrapper[4910]: E1125 21:40:27.773753 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 25 21:40:27 crc kubenswrapper[4910]: E1125 21:40:27.775508 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e" cmd=["/bin/bash","-c","set -xeo pipefail\n. /ovnkube-lib/ovnkube-lib.sh || exit 1\novndb-readiness-probe \"sb\"\n"] Nov 25 21:40:27 crc kubenswrapper[4910]: E1125 21:40:27.775557 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="sbdb" Nov 25 21:40:27 crc kubenswrapper[4910]: I1125 21:40:27.981841 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/3.log" Nov 25 21:40:27 crc kubenswrapper[4910]: I1125 21:40:27.985034 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovn-acl-logging/0.log" Nov 25 21:40:27 crc kubenswrapper[4910]: I1125 21:40:27.985656 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovn-controller/0.log" Nov 25 21:40:27 crc kubenswrapper[4910]: I1125 21:40:27.986170 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.042850 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-nvbkd"] Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.043364 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="kube-rbac-proxy-node" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.043477 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="kube-rbac-proxy-node" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.043560 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="sbdb" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.043629 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="sbdb" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.043706 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.043784 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.043857 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.043934 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.044004 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="nbdb" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.044076 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="nbdb" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.044156 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="kubecfg-setup" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.044232 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="kubecfg-setup" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.044374 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="northd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.044449 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="northd" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.044525 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.044597 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.044665 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovn-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.044740 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovn-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.044811 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovn-acl-logging" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.044876 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovn-acl-logging" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.044949 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.045030 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.045264 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.045363 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="kube-rbac-proxy-node" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.045439 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovn-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.045511 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.045584 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="sbdb" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.045678 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="kube-rbac-proxy-ovn-metrics" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.045760 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.045830 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovn-acl-logging" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.045907 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="nbdb" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.045982 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.046058 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="northd" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.046235 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.046346 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.046551 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.046792 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.046901 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" containerName="ovnkube-controller" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.048946 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137432 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-run-netns\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137503 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-run-ovn-kubernetes\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137529 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-log-socket\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137563 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-ovnkube-config\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137612 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-ovn\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137638 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-env-overrides\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137676 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-cni-netd\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137704 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-slash\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137732 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ptdrh\" (UniqueName: \"kubernetes.io/projected/4cf48d68-85c8-45e7-8533-550e120eca12-kube-api-access-ptdrh\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137749 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-systemd\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137771 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-etc-openvswitch\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137789 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-node-log\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137813 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-var-lib-openvswitch\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137836 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-var-lib-cni-networks-ovn-kubernetes\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137868 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-systemd-units\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137890 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4cf48d68-85c8-45e7-8533-550e120eca12-ovn-node-metrics-cert\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137917 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-kubelet\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137943 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-ovnkube-script-lib\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137965 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-openvswitch\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.137987 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-cni-bin\") pod \"4cf48d68-85c8-45e7-8533-550e120eca12\" (UID: \"4cf48d68-85c8-45e7-8533-550e120eca12\") " Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.138307 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.138374 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.138402 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.138427 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-node-log" (OuterVolumeSpecName: "node-log") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.138451 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.138477 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.138500 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.138554 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.138625 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.138683 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.138655 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-slash" (OuterVolumeSpecName: "host-slash") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.138733 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.138788 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.138821 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-log-socket" (OuterVolumeSpecName: "log-socket") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.139202 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.139277 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.139235 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.146901 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4cf48d68-85c8-45e7-8533-550e120eca12-kube-api-access-ptdrh" (OuterVolumeSpecName: "kube-api-access-ptdrh") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "kube-api-access-ptdrh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.147308 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4cf48d68-85c8-45e7-8533-550e120eca12-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.156342 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "4cf48d68-85c8-45e7-8533-550e120eca12" (UID: "4cf48d68-85c8-45e7-8533-550e120eca12"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.239944 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/11d22b7b-2361-490e-813a-1e23ba18ecb9-ovn-node-metrics-cert\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.240018 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-run-systemd\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.240052 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-run-ovn-kubernetes\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.240076 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.240098 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/11d22b7b-2361-490e-813a-1e23ba18ecb9-ovnkube-script-lib\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.240909 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-var-lib-openvswitch\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.240977 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-cni-netd\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241002 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-cni-bin\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241025 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-run-openvswitch\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241059 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/11d22b7b-2361-490e-813a-1e23ba18ecb9-env-overrides\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241099 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-slash\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241133 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsjp9\" (UniqueName: \"kubernetes.io/projected/11d22b7b-2361-490e-813a-1e23ba18ecb9-kube-api-access-tsjp9\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241156 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-run-netns\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241177 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-etc-openvswitch\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241206 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/11d22b7b-2361-490e-813a-1e23ba18ecb9-ovnkube-config\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241228 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-systemd-units\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241276 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-node-log\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241303 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-kubelet\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241329 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-run-ovn\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241350 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-log-socket\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241402 4910 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241417 4910 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-log-socket\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241431 4910 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241447 4910 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241460 4910 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241473 4910 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241485 4910 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-slash\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241498 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ptdrh\" (UniqueName: \"kubernetes.io/projected/4cf48d68-85c8-45e7-8533-550e120eca12-kube-api-access-ptdrh\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241510 4910 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241522 4910 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241533 4910 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-node-log\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241545 4910 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241559 4910 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241573 4910 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241585 4910 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/4cf48d68-85c8-45e7-8533-550e120eca12-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241599 4910 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241611 4910 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/4cf48d68-85c8-45e7-8533-550e120eca12-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241623 4910 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241634 4910 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.241647 4910 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/4cf48d68-85c8-45e7-8533-550e120eca12-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.343008 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-run-ovn\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.343480 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-log-socket\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.343185 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-run-ovn\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.343553 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-log-socket\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.343768 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/11d22b7b-2361-490e-813a-1e23ba18ecb9-ovn-node-metrics-cert\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.343914 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-run-systemd\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.343940 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-run-ovn-kubernetes\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.343961 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.343991 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/11d22b7b-2361-490e-813a-1e23ba18ecb9-ovnkube-script-lib\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344047 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-var-lib-openvswitch\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344049 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-run-systemd\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344094 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-cni-netd\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344049 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-run-ovn-kubernetes\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344115 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-cni-bin\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344144 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-run-openvswitch\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344147 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-var-lib-openvswitch\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344182 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-cni-netd\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344168 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344264 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-run-openvswitch\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344204 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/11d22b7b-2361-490e-813a-1e23ba18ecb9-env-overrides\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344221 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-cni-bin\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344392 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-slash\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344480 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsjp9\" (UniqueName: \"kubernetes.io/projected/11d22b7b-2361-490e-813a-1e23ba18ecb9-kube-api-access-tsjp9\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344510 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-run-netns\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344582 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-etc-openvswitch\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344630 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/11d22b7b-2361-490e-813a-1e23ba18ecb9-ovnkube-config\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344653 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-systemd-units\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344720 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-node-log\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344755 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-kubelet\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344795 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/11d22b7b-2361-490e-813a-1e23ba18ecb9-env-overrides\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344813 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/11d22b7b-2361-490e-813a-1e23ba18ecb9-ovnkube-script-lib\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344882 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-kubelet\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344910 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-systemd-units\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344904 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-etc-openvswitch\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344931 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-run-netns\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344934 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-host-slash\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.344972 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/11d22b7b-2361-490e-813a-1e23ba18ecb9-node-log\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.345929 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/11d22b7b-2361-490e-813a-1e23ba18ecb9-ovnkube-config\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.352616 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/11d22b7b-2361-490e-813a-1e23ba18ecb9-ovn-node-metrics-cert\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.363304 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsjp9\" (UniqueName: \"kubernetes.io/projected/11d22b7b-2361-490e-813a-1e23ba18ecb9-kube-api-access-tsjp9\") pod \"ovnkube-node-nvbkd\" (UID: \"11d22b7b-2361-490e-813a-1e23ba18ecb9\") " pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.373799 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.643823 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovnkube-controller/3.log" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.646676 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovn-acl-logging/0.log" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647285 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cvj2j_4cf48d68-85c8-45e7-8533-550e120eca12/ovn-controller/0.log" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647619 4910 generic.go:334] "Generic (PLEG): container finished" podID="4cf48d68-85c8-45e7-8533-550e120eca12" containerID="0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b" exitCode=0 Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647647 4910 generic.go:334] "Generic (PLEG): container finished" podID="4cf48d68-85c8-45e7-8533-550e120eca12" containerID="a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e" exitCode=0 Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647655 4910 generic.go:334] "Generic (PLEG): container finished" podID="4cf48d68-85c8-45e7-8533-550e120eca12" containerID="425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326" exitCode=0 Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647663 4910 generic.go:334] "Generic (PLEG): container finished" podID="4cf48d68-85c8-45e7-8533-550e120eca12" containerID="4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544" exitCode=0 Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647669 4910 generic.go:334] "Generic (PLEG): container finished" podID="4cf48d68-85c8-45e7-8533-550e120eca12" containerID="c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204" exitCode=0 Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647675 4910 generic.go:334] "Generic (PLEG): container finished" podID="4cf48d68-85c8-45e7-8533-550e120eca12" containerID="c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978" exitCode=0 Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647682 4910 generic.go:334] "Generic (PLEG): container finished" podID="4cf48d68-85c8-45e7-8533-550e120eca12" containerID="6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515" exitCode=143 Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647688 4910 generic.go:334] "Generic (PLEG): container finished" podID="4cf48d68-85c8-45e7-8533-550e120eca12" containerID="055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7" exitCode=143 Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647724 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerDied","Data":"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647755 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerDied","Data":"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647765 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerDied","Data":"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647774 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerDied","Data":"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647782 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerDied","Data":"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647790 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerDied","Data":"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647805 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647819 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647826 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647832 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647837 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647842 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647847 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647852 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647857 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647865 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerDied","Data":"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647873 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647879 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647884 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647888 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647893 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647898 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647903 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647908 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647913 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647917 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647924 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerDied","Data":"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647931 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647937 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647941 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647948 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647952 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647957 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647962 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647967 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647972 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647977 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647983 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" event={"ID":"4cf48d68-85c8-45e7-8533-550e120eca12","Type":"ContainerDied","Data":"48afbf381caf236ff73201c7191b44c67b199e9befc0eba5fb283b999dd0ff9b"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647991 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.647997 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.648003 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.648008 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.648014 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.648019 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.648023 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.648028 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.648033 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.648038 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.648054 4910 scope.go:117] "RemoveContainer" containerID="0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.648196 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-cvj2j" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.651268 4910 generic.go:334] "Generic (PLEG): container finished" podID="11d22b7b-2361-490e-813a-1e23ba18ecb9" containerID="02126298560e4373d341ffd63e5041d7c6fe2596f5f1cd5464a83c3c2568c155" exitCode=0 Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.651354 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" event={"ID":"11d22b7b-2361-490e-813a-1e23ba18ecb9","Type":"ContainerDied","Data":"02126298560e4373d341ffd63e5041d7c6fe2596f5f1cd5464a83c3c2568c155"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.651390 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" event={"ID":"11d22b7b-2361-490e-813a-1e23ba18ecb9","Type":"ContainerStarted","Data":"a57365402d91f445e6db5a6b3d4cf44ce796a0bedd62a0f673eb7417b7471a7a"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.657613 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gqjcx_751fe267-dc17-4de7-81e9-a8caab9e9817/kube-multus/2.log" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.658532 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gqjcx_751fe267-dc17-4de7-81e9-a8caab9e9817/kube-multus/1.log" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.658567 4910 generic.go:334] "Generic (PLEG): container finished" podID="751fe267-dc17-4de7-81e9-a8caab9e9817" containerID="ea56b080cad081ce614b04495b79e924097aaedd91ee98cd8bfb6edb241108dd" exitCode=2 Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.658592 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gqjcx" event={"ID":"751fe267-dc17-4de7-81e9-a8caab9e9817","Type":"ContainerDied","Data":"ea56b080cad081ce614b04495b79e924097aaedd91ee98cd8bfb6edb241108dd"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.658609 4910 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1b39c30d9198f06a3a3cc78ca7e9b031782bf2c9d50dc9801caee0d5e6b9d660"} Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.658941 4910 scope.go:117] "RemoveContainer" containerID="ea56b080cad081ce614b04495b79e924097aaedd91ee98cd8bfb6edb241108dd" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.659146 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-gqjcx_openshift-multus(751fe267-dc17-4de7-81e9-a8caab9e9817)\"" pod="openshift-multus/multus-gqjcx" podUID="751fe267-dc17-4de7-81e9-a8caab9e9817" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.688912 4910 scope.go:117] "RemoveContainer" containerID="4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.709546 4910 scope.go:117] "RemoveContainer" containerID="a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.728565 4910 scope.go:117] "RemoveContainer" containerID="425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.737560 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-cvj2j"] Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.746476 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-cvj2j"] Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.768842 4910 scope.go:117] "RemoveContainer" containerID="4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.810115 4910 scope.go:117] "RemoveContainer" containerID="c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.829623 4910 scope.go:117] "RemoveContainer" containerID="c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.845774 4910 scope.go:117] "RemoveContainer" containerID="6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.858584 4910 scope.go:117] "RemoveContainer" containerID="055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.877390 4910 scope.go:117] "RemoveContainer" containerID="9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.894273 4910 scope.go:117] "RemoveContainer" containerID="0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.894758 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b\": container with ID starting with 0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b not found: ID does not exist" containerID="0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.894835 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b"} err="failed to get container status \"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b\": rpc error: code = NotFound desc = could not find container \"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b\": container with ID starting with 0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.894894 4910 scope.go:117] "RemoveContainer" containerID="4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.895333 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\": container with ID starting with 4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e not found: ID does not exist" containerID="4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.895379 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e"} err="failed to get container status \"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\": rpc error: code = NotFound desc = could not find container \"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\": container with ID starting with 4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.895404 4910 scope.go:117] "RemoveContainer" containerID="a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.895895 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\": container with ID starting with a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e not found: ID does not exist" containerID="a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.895950 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e"} err="failed to get container status \"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\": rpc error: code = NotFound desc = could not find container \"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\": container with ID starting with a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.895995 4910 scope.go:117] "RemoveContainer" containerID="425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.896352 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\": container with ID starting with 425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326 not found: ID does not exist" containerID="425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.896396 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326"} err="failed to get container status \"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\": rpc error: code = NotFound desc = could not find container \"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\": container with ID starting with 425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.896420 4910 scope.go:117] "RemoveContainer" containerID="4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.896808 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\": container with ID starting with 4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544 not found: ID does not exist" containerID="4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.896837 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544"} err="failed to get container status \"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\": rpc error: code = NotFound desc = could not find container \"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\": container with ID starting with 4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.896853 4910 scope.go:117] "RemoveContainer" containerID="c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.897225 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\": container with ID starting with c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204 not found: ID does not exist" containerID="c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.897271 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204"} err="failed to get container status \"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\": rpc error: code = NotFound desc = could not find container \"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\": container with ID starting with c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.897289 4910 scope.go:117] "RemoveContainer" containerID="c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.897538 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\": container with ID starting with c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978 not found: ID does not exist" containerID="c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.897576 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978"} err="failed to get container status \"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\": rpc error: code = NotFound desc = could not find container \"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\": container with ID starting with c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.897606 4910 scope.go:117] "RemoveContainer" containerID="6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.898282 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\": container with ID starting with 6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515 not found: ID does not exist" containerID="6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.898311 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515"} err="failed to get container status \"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\": rpc error: code = NotFound desc = could not find container \"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\": container with ID starting with 6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.898328 4910 scope.go:117] "RemoveContainer" containerID="055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.898614 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\": container with ID starting with 055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7 not found: ID does not exist" containerID="055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.898647 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7"} err="failed to get container status \"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\": rpc error: code = NotFound desc = could not find container \"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\": container with ID starting with 055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.898665 4910 scope.go:117] "RemoveContainer" containerID="9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6" Nov 25 21:40:28 crc kubenswrapper[4910]: E1125 21:40:28.898991 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\": container with ID starting with 9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6 not found: ID does not exist" containerID="9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.899027 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6"} err="failed to get container status \"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\": rpc error: code = NotFound desc = could not find container \"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\": container with ID starting with 9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.899049 4910 scope.go:117] "RemoveContainer" containerID="0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.899403 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b"} err="failed to get container status \"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b\": rpc error: code = NotFound desc = could not find container \"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b\": container with ID starting with 0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.899430 4910 scope.go:117] "RemoveContainer" containerID="4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.899628 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e"} err="failed to get container status \"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\": rpc error: code = NotFound desc = could not find container \"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\": container with ID starting with 4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.899652 4910 scope.go:117] "RemoveContainer" containerID="a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.899991 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e"} err="failed to get container status \"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\": rpc error: code = NotFound desc = could not find container \"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\": container with ID starting with a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.900012 4910 scope.go:117] "RemoveContainer" containerID="425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.900255 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326"} err="failed to get container status \"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\": rpc error: code = NotFound desc = could not find container \"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\": container with ID starting with 425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.900282 4910 scope.go:117] "RemoveContainer" containerID="4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.900563 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544"} err="failed to get container status \"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\": rpc error: code = NotFound desc = could not find container \"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\": container with ID starting with 4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.900587 4910 scope.go:117] "RemoveContainer" containerID="c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.900815 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204"} err="failed to get container status \"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\": rpc error: code = NotFound desc = could not find container \"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\": container with ID starting with c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.900845 4910 scope.go:117] "RemoveContainer" containerID="c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.901058 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978"} err="failed to get container status \"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\": rpc error: code = NotFound desc = could not find container \"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\": container with ID starting with c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.901074 4910 scope.go:117] "RemoveContainer" containerID="6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.901438 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515"} err="failed to get container status \"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\": rpc error: code = NotFound desc = could not find container \"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\": container with ID starting with 6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.901464 4910 scope.go:117] "RemoveContainer" containerID="055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.901874 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7"} err="failed to get container status \"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\": rpc error: code = NotFound desc = could not find container \"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\": container with ID starting with 055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.901906 4910 scope.go:117] "RemoveContainer" containerID="9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.902214 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6"} err="failed to get container status \"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\": rpc error: code = NotFound desc = could not find container \"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\": container with ID starting with 9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.902251 4910 scope.go:117] "RemoveContainer" containerID="0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.902548 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b"} err="failed to get container status \"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b\": rpc error: code = NotFound desc = could not find container \"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b\": container with ID starting with 0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.902566 4910 scope.go:117] "RemoveContainer" containerID="4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.902880 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e"} err="failed to get container status \"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\": rpc error: code = NotFound desc = could not find container \"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\": container with ID starting with 4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.902906 4910 scope.go:117] "RemoveContainer" containerID="a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.903189 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e"} err="failed to get container status \"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\": rpc error: code = NotFound desc = could not find container \"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\": container with ID starting with a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.903208 4910 scope.go:117] "RemoveContainer" containerID="425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.903567 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326"} err="failed to get container status \"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\": rpc error: code = NotFound desc = could not find container \"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\": container with ID starting with 425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.903590 4910 scope.go:117] "RemoveContainer" containerID="4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.903854 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544"} err="failed to get container status \"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\": rpc error: code = NotFound desc = could not find container \"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\": container with ID starting with 4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.903880 4910 scope.go:117] "RemoveContainer" containerID="c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.904189 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204"} err="failed to get container status \"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\": rpc error: code = NotFound desc = could not find container \"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\": container with ID starting with c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.904227 4910 scope.go:117] "RemoveContainer" containerID="c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.904624 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978"} err="failed to get container status \"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\": rpc error: code = NotFound desc = could not find container \"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\": container with ID starting with c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.904650 4910 scope.go:117] "RemoveContainer" containerID="6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.905039 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515"} err="failed to get container status \"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\": rpc error: code = NotFound desc = could not find container \"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\": container with ID starting with 6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.905070 4910 scope.go:117] "RemoveContainer" containerID="055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.905400 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7"} err="failed to get container status \"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\": rpc error: code = NotFound desc = could not find container \"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\": container with ID starting with 055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.905427 4910 scope.go:117] "RemoveContainer" containerID="9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.905844 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6"} err="failed to get container status \"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\": rpc error: code = NotFound desc = could not find container \"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\": container with ID starting with 9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.905870 4910 scope.go:117] "RemoveContainer" containerID="0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.906208 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b"} err="failed to get container status \"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b\": rpc error: code = NotFound desc = could not find container \"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b\": container with ID starting with 0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.906229 4910 scope.go:117] "RemoveContainer" containerID="4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.906542 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e"} err="failed to get container status \"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\": rpc error: code = NotFound desc = could not find container \"4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e\": container with ID starting with 4db813d54d9336e038bf8a457c42c516cd3fbbd2e33855ee57d1ed22b847051e not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.906567 4910 scope.go:117] "RemoveContainer" containerID="a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.906896 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e"} err="failed to get container status \"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\": rpc error: code = NotFound desc = could not find container \"a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e\": container with ID starting with a2c05304e0db3cf7d2f94c882663262aae099638b281d7da1eaec2f7b8fcec5e not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.906922 4910 scope.go:117] "RemoveContainer" containerID="425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.907225 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326"} err="failed to get container status \"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\": rpc error: code = NotFound desc = could not find container \"425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326\": container with ID starting with 425f94d9d9510604b4a3e99066329036c6d5fd54dd214cc7e4ab9e9523451326 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.907275 4910 scope.go:117] "RemoveContainer" containerID="4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.907620 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544"} err="failed to get container status \"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\": rpc error: code = NotFound desc = could not find container \"4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544\": container with ID starting with 4316702ef266b5149262c4306fa6761e343afb4c8bfe6767d65ec87ea516a544 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.907646 4910 scope.go:117] "RemoveContainer" containerID="c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.907954 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204"} err="failed to get container status \"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\": rpc error: code = NotFound desc = could not find container \"c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204\": container with ID starting with c4a2bb53d4654ad7c58a56ff02be6e3fad64224c36dd02b30a37a2fd6fc80204 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.907983 4910 scope.go:117] "RemoveContainer" containerID="c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.908528 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978"} err="failed to get container status \"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\": rpc error: code = NotFound desc = could not find container \"c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978\": container with ID starting with c90e0db889799e6e9608b4856f5a895efff568540b1e99b979c66c88407e6978 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.908556 4910 scope.go:117] "RemoveContainer" containerID="6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.908934 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515"} err="failed to get container status \"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\": rpc error: code = NotFound desc = could not find container \"6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515\": container with ID starting with 6564bb4022007b3dba6fddd0e350d0fd243f61e9927f228b74c1e0496862e515 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.908959 4910 scope.go:117] "RemoveContainer" containerID="055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.909453 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7"} err="failed to get container status \"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\": rpc error: code = NotFound desc = could not find container \"055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7\": container with ID starting with 055876a78d3ab25ebe8c21f41abc06781f34f35584766944de1455325ceca2f7 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.909474 4910 scope.go:117] "RemoveContainer" containerID="9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.909866 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6"} err="failed to get container status \"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\": rpc error: code = NotFound desc = could not find container \"9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6\": container with ID starting with 9fffbd2f944b00e9b881be197a79a7914751f988531b8416ab7406bb961248c6 not found: ID does not exist" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.909889 4910 scope.go:117] "RemoveContainer" containerID="0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b" Nov 25 21:40:28 crc kubenswrapper[4910]: I1125 21:40:28.910145 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b"} err="failed to get container status \"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b\": rpc error: code = NotFound desc = could not find container \"0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b\": container with ID starting with 0aa8542b70779d1995871272cd7496cda200a6a7c8cbc291662a2642b3a9793b not found: ID does not exist" Nov 25 21:40:29 crc kubenswrapper[4910]: I1125 21:40:29.214847 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4cf48d68-85c8-45e7-8533-550e120eca12" path="/var/lib/kubelet/pods/4cf48d68-85c8-45e7-8533-550e120eca12/volumes" Nov 25 21:40:29 crc kubenswrapper[4910]: I1125 21:40:29.670602 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" event={"ID":"11d22b7b-2361-490e-813a-1e23ba18ecb9","Type":"ContainerStarted","Data":"f9995c5481b54b87b0dddcdf98ca33d2bdd7c3462edb5412cef95b4ab2bd62d2"} Nov 25 21:40:29 crc kubenswrapper[4910]: I1125 21:40:29.670671 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" event={"ID":"11d22b7b-2361-490e-813a-1e23ba18ecb9","Type":"ContainerStarted","Data":"f328f8d0f3d7e4d289c370b8e5d87f42d55240c4412f84a2c2b770ccb850aea1"} Nov 25 21:40:29 crc kubenswrapper[4910]: I1125 21:40:29.670691 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" event={"ID":"11d22b7b-2361-490e-813a-1e23ba18ecb9","Type":"ContainerStarted","Data":"c9f6db136e7fe0eb11bad4d89ddc8d9da13952f8fb998d9d6481d3b7d4085df1"} Nov 25 21:40:29 crc kubenswrapper[4910]: I1125 21:40:29.670706 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" event={"ID":"11d22b7b-2361-490e-813a-1e23ba18ecb9","Type":"ContainerStarted","Data":"7abbbd2e307d1755e09e1cb1b14fa508d6e209d88affcad7efe56d9291fe3632"} Nov 25 21:40:29 crc kubenswrapper[4910]: I1125 21:40:29.670717 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" event={"ID":"11d22b7b-2361-490e-813a-1e23ba18ecb9","Type":"ContainerStarted","Data":"58ae83f4c208ef7ed8d92953690bed555a7845d318d7d48a45035e98517b7d95"} Nov 25 21:40:29 crc kubenswrapper[4910]: I1125 21:40:29.670730 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" event={"ID":"11d22b7b-2361-490e-813a-1e23ba18ecb9","Type":"ContainerStarted","Data":"cd5be32ae69b0576272f7e6aed5d6893bd23e306b09e2f84f07e31ebdbf842bf"} Nov 25 21:40:32 crc kubenswrapper[4910]: I1125 21:40:32.694160 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" event={"ID":"11d22b7b-2361-490e-813a-1e23ba18ecb9","Type":"ContainerStarted","Data":"ceb614e41a2cc0e30f9fcee593933c01d67d5ad7f70be7a8836f41c2f2c71f39"} Nov 25 21:40:34 crc kubenswrapper[4910]: I1125 21:40:34.712478 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" event={"ID":"11d22b7b-2361-490e-813a-1e23ba18ecb9","Type":"ContainerStarted","Data":"8d2a1d142e0bb4be10b22c6bc394cf15372e7948a9747edaaa46a9384ad2bfff"} Nov 25 21:40:34 crc kubenswrapper[4910]: I1125 21:40:34.713038 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:34 crc kubenswrapper[4910]: I1125 21:40:34.713070 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:34 crc kubenswrapper[4910]: I1125 21:40:34.713091 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:34 crc kubenswrapper[4910]: I1125 21:40:34.752369 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:34 crc kubenswrapper[4910]: I1125 21:40:34.752883 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:40:34 crc kubenswrapper[4910]: I1125 21:40:34.763193 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" podStartSLOduration=6.763172567 podStartE2EDuration="6.763172567s" podCreationTimestamp="2025-11-25 21:40:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:40:34.760835353 +0000 UTC m=+590.223311685" watchObservedRunningTime="2025-11-25 21:40:34.763172567 +0000 UTC m=+590.225648899" Nov 25 21:40:43 crc kubenswrapper[4910]: I1125 21:40:43.207433 4910 scope.go:117] "RemoveContainer" containerID="ea56b080cad081ce614b04495b79e924097aaedd91ee98cd8bfb6edb241108dd" Nov 25 21:40:43 crc kubenswrapper[4910]: E1125 21:40:43.208376 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-gqjcx_openshift-multus(751fe267-dc17-4de7-81e9-a8caab9e9817)\"" pod="openshift-multus/multus-gqjcx" podUID="751fe267-dc17-4de7-81e9-a8caab9e9817" Nov 25 21:40:45 crc kubenswrapper[4910]: I1125 21:40:45.452969 4910 scope.go:117] "RemoveContainer" containerID="1b39c30d9198f06a3a3cc78ca7e9b031782bf2c9d50dc9801caee0d5e6b9d660" Nov 25 21:40:45 crc kubenswrapper[4910]: I1125 21:40:45.793909 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gqjcx_751fe267-dc17-4de7-81e9-a8caab9e9817/kube-multus/2.log" Nov 25 21:40:56 crc kubenswrapper[4910]: I1125 21:40:56.203764 4910 scope.go:117] "RemoveContainer" containerID="ea56b080cad081ce614b04495b79e924097aaedd91ee98cd8bfb6edb241108dd" Nov 25 21:40:56 crc kubenswrapper[4910]: I1125 21:40:56.863871 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-gqjcx_751fe267-dc17-4de7-81e9-a8caab9e9817/kube-multus/2.log" Nov 25 21:40:56 crc kubenswrapper[4910]: I1125 21:40:56.864297 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-gqjcx" event={"ID":"751fe267-dc17-4de7-81e9-a8caab9e9817","Type":"ContainerStarted","Data":"7a1c092bff6203ade8307094431e13afd8336257a720b210c3303d8095790031"} Nov 25 21:40:58 crc kubenswrapper[4910]: I1125 21:40:58.400173 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-nvbkd" Nov 25 21:41:06 crc kubenswrapper[4910]: I1125 21:41:06.585022 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc"] Nov 25 21:41:06 crc kubenswrapper[4910]: I1125 21:41:06.589124 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" Nov 25 21:41:06 crc kubenswrapper[4910]: I1125 21:41:06.601601 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc"] Nov 25 21:41:06 crc kubenswrapper[4910]: I1125 21:41:06.642388 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 21:41:06 crc kubenswrapper[4910]: I1125 21:41:06.643320 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s76n6\" (UniqueName: \"kubernetes.io/projected/3c3663ce-55da-4575-b39f-43df7bf5e729-kube-api-access-s76n6\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc\" (UID: \"3c3663ce-55da-4575-b39f-43df7bf5e729\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" Nov 25 21:41:06 crc kubenswrapper[4910]: I1125 21:41:06.643426 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3c3663ce-55da-4575-b39f-43df7bf5e729-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc\" (UID: \"3c3663ce-55da-4575-b39f-43df7bf5e729\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" Nov 25 21:41:06 crc kubenswrapper[4910]: I1125 21:41:06.643573 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3c3663ce-55da-4575-b39f-43df7bf5e729-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc\" (UID: \"3c3663ce-55da-4575-b39f-43df7bf5e729\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" Nov 25 21:41:06 crc kubenswrapper[4910]: I1125 21:41:06.745053 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3c3663ce-55da-4575-b39f-43df7bf5e729-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc\" (UID: \"3c3663ce-55da-4575-b39f-43df7bf5e729\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" Nov 25 21:41:06 crc kubenswrapper[4910]: I1125 21:41:06.745133 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s76n6\" (UniqueName: \"kubernetes.io/projected/3c3663ce-55da-4575-b39f-43df7bf5e729-kube-api-access-s76n6\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc\" (UID: \"3c3663ce-55da-4575-b39f-43df7bf5e729\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" Nov 25 21:41:06 crc kubenswrapper[4910]: I1125 21:41:06.745157 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3c3663ce-55da-4575-b39f-43df7bf5e729-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc\" (UID: \"3c3663ce-55da-4575-b39f-43df7bf5e729\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" Nov 25 21:41:06 crc kubenswrapper[4910]: I1125 21:41:06.746024 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3c3663ce-55da-4575-b39f-43df7bf5e729-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc\" (UID: \"3c3663ce-55da-4575-b39f-43df7bf5e729\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" Nov 25 21:41:06 crc kubenswrapper[4910]: I1125 21:41:06.746232 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3c3663ce-55da-4575-b39f-43df7bf5e729-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc\" (UID: \"3c3663ce-55da-4575-b39f-43df7bf5e729\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" Nov 25 21:41:06 crc kubenswrapper[4910]: I1125 21:41:06.764990 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s76n6\" (UniqueName: \"kubernetes.io/projected/3c3663ce-55da-4575-b39f-43df7bf5e729-kube-api-access-s76n6\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc\" (UID: \"3c3663ce-55da-4575-b39f-43df7bf5e729\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" Nov 25 21:41:06 crc kubenswrapper[4910]: I1125 21:41:06.963354 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" Nov 25 21:41:07 crc kubenswrapper[4910]: I1125 21:41:07.220471 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc"] Nov 25 21:41:07 crc kubenswrapper[4910]: I1125 21:41:07.933347 4910 generic.go:334] "Generic (PLEG): container finished" podID="3c3663ce-55da-4575-b39f-43df7bf5e729" containerID="b7d71a230540e5397a888766b393c73f39379111a7bbf65f32f643a03bdcf9b6" exitCode=0 Nov 25 21:41:07 crc kubenswrapper[4910]: I1125 21:41:07.933436 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" event={"ID":"3c3663ce-55da-4575-b39f-43df7bf5e729","Type":"ContainerDied","Data":"b7d71a230540e5397a888766b393c73f39379111a7bbf65f32f643a03bdcf9b6"} Nov 25 21:41:07 crc kubenswrapper[4910]: I1125 21:41:07.936502 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" event={"ID":"3c3663ce-55da-4575-b39f-43df7bf5e729","Type":"ContainerStarted","Data":"0a81475fc4839229beab49a9ef6d3773ec12273e850ddfb22352325ee670b3a1"} Nov 25 21:41:09 crc kubenswrapper[4910]: I1125 21:41:09.959263 4910 generic.go:334] "Generic (PLEG): container finished" podID="3c3663ce-55da-4575-b39f-43df7bf5e729" containerID="4dedf834c624946de8b660522a96935c7e00642f1bae9cb970ef9e4c1d7ba324" exitCode=0 Nov 25 21:41:09 crc kubenswrapper[4910]: I1125 21:41:09.959303 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" event={"ID":"3c3663ce-55da-4575-b39f-43df7bf5e729","Type":"ContainerDied","Data":"4dedf834c624946de8b660522a96935c7e00642f1bae9cb970ef9e4c1d7ba324"} Nov 25 21:41:10 crc kubenswrapper[4910]: I1125 21:41:10.970753 4910 generic.go:334] "Generic (PLEG): container finished" podID="3c3663ce-55da-4575-b39f-43df7bf5e729" containerID="d07c42f365115f01811b9cf0e5eaa640248483730554a8d19ac4df548179b6c9" exitCode=0 Nov 25 21:41:10 crc kubenswrapper[4910]: I1125 21:41:10.970839 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" event={"ID":"3c3663ce-55da-4575-b39f-43df7bf5e729","Type":"ContainerDied","Data":"d07c42f365115f01811b9cf0e5eaa640248483730554a8d19ac4df548179b6c9"} Nov 25 21:41:12 crc kubenswrapper[4910]: I1125 21:41:12.227985 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" Nov 25 21:41:12 crc kubenswrapper[4910]: I1125 21:41:12.424305 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s76n6\" (UniqueName: \"kubernetes.io/projected/3c3663ce-55da-4575-b39f-43df7bf5e729-kube-api-access-s76n6\") pod \"3c3663ce-55da-4575-b39f-43df7bf5e729\" (UID: \"3c3663ce-55da-4575-b39f-43df7bf5e729\") " Nov 25 21:41:12 crc kubenswrapper[4910]: I1125 21:41:12.424391 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3c3663ce-55da-4575-b39f-43df7bf5e729-bundle\") pod \"3c3663ce-55da-4575-b39f-43df7bf5e729\" (UID: \"3c3663ce-55da-4575-b39f-43df7bf5e729\") " Nov 25 21:41:12 crc kubenswrapper[4910]: I1125 21:41:12.424544 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3c3663ce-55da-4575-b39f-43df7bf5e729-util\") pod \"3c3663ce-55da-4575-b39f-43df7bf5e729\" (UID: \"3c3663ce-55da-4575-b39f-43df7bf5e729\") " Nov 25 21:41:12 crc kubenswrapper[4910]: I1125 21:41:12.425118 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c3663ce-55da-4575-b39f-43df7bf5e729-bundle" (OuterVolumeSpecName: "bundle") pod "3c3663ce-55da-4575-b39f-43df7bf5e729" (UID: "3c3663ce-55da-4575-b39f-43df7bf5e729"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:41:12 crc kubenswrapper[4910]: I1125 21:41:12.434779 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c3663ce-55da-4575-b39f-43df7bf5e729-kube-api-access-s76n6" (OuterVolumeSpecName: "kube-api-access-s76n6") pod "3c3663ce-55da-4575-b39f-43df7bf5e729" (UID: "3c3663ce-55da-4575-b39f-43df7bf5e729"). InnerVolumeSpecName "kube-api-access-s76n6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:41:12 crc kubenswrapper[4910]: I1125 21:41:12.438343 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3c3663ce-55da-4575-b39f-43df7bf5e729-util" (OuterVolumeSpecName: "util") pod "3c3663ce-55da-4575-b39f-43df7bf5e729" (UID: "3c3663ce-55da-4575-b39f-43df7bf5e729"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:41:12 crc kubenswrapper[4910]: I1125 21:41:12.525894 4910 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3c3663ce-55da-4575-b39f-43df7bf5e729-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:41:12 crc kubenswrapper[4910]: I1125 21:41:12.525944 4910 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3c3663ce-55da-4575-b39f-43df7bf5e729-util\") on node \"crc\" DevicePath \"\"" Nov 25 21:41:12 crc kubenswrapper[4910]: I1125 21:41:12.525957 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s76n6\" (UniqueName: \"kubernetes.io/projected/3c3663ce-55da-4575-b39f-43df7bf5e729-kube-api-access-s76n6\") on node \"crc\" DevicePath \"\"" Nov 25 21:41:12 crc kubenswrapper[4910]: I1125 21:41:12.993835 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" event={"ID":"3c3663ce-55da-4575-b39f-43df7bf5e729","Type":"ContainerDied","Data":"0a81475fc4839229beab49a9ef6d3773ec12273e850ddfb22352325ee670b3a1"} Nov 25 21:41:12 crc kubenswrapper[4910]: I1125 21:41:12.994575 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0a81475fc4839229beab49a9ef6d3773ec12273e850ddfb22352325ee670b3a1" Nov 25 21:41:12 crc kubenswrapper[4910]: I1125 21:41:12.993941 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc" Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.211099 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-bgkcs"] Nov 25 21:41:14 crc kubenswrapper[4910]: E1125 21:41:14.211357 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c3663ce-55da-4575-b39f-43df7bf5e729" containerName="pull" Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.211371 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c3663ce-55da-4575-b39f-43df7bf5e729" containerName="pull" Nov 25 21:41:14 crc kubenswrapper[4910]: E1125 21:41:14.211382 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c3663ce-55da-4575-b39f-43df7bf5e729" containerName="util" Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.211389 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c3663ce-55da-4575-b39f-43df7bf5e729" containerName="util" Nov 25 21:41:14 crc kubenswrapper[4910]: E1125 21:41:14.211398 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c3663ce-55da-4575-b39f-43df7bf5e729" containerName="extract" Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.211404 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c3663ce-55da-4575-b39f-43df7bf5e729" containerName="extract" Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.211505 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c3663ce-55da-4575-b39f-43df7bf5e729" containerName="extract" Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.211897 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-bgkcs" Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.214997 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.215309 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-grvxp" Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.217457 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.223674 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-bgkcs"] Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.349514 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4894t\" (UniqueName: \"kubernetes.io/projected/3b2b7b01-5b19-471d-bec2-10f3182a21cd-kube-api-access-4894t\") pod \"nmstate-operator-557fdffb88-bgkcs\" (UID: \"3b2b7b01-5b19-471d-bec2-10f3182a21cd\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-bgkcs" Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.450795 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4894t\" (UniqueName: \"kubernetes.io/projected/3b2b7b01-5b19-471d-bec2-10f3182a21cd-kube-api-access-4894t\") pod \"nmstate-operator-557fdffb88-bgkcs\" (UID: \"3b2b7b01-5b19-471d-bec2-10f3182a21cd\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-bgkcs" Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.472762 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4894t\" (UniqueName: \"kubernetes.io/projected/3b2b7b01-5b19-471d-bec2-10f3182a21cd-kube-api-access-4894t\") pod \"nmstate-operator-557fdffb88-bgkcs\" (UID: \"3b2b7b01-5b19-471d-bec2-10f3182a21cd\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-bgkcs" Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.526282 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-bgkcs" Nov 25 21:41:14 crc kubenswrapper[4910]: I1125 21:41:14.722116 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-bgkcs"] Nov 25 21:41:14 crc kubenswrapper[4910]: W1125 21:41:14.734366 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b2b7b01_5b19_471d_bec2_10f3182a21cd.slice/crio-1a1f6496308bf2416d1e346ec0c61f0d9e848ad0e45e50dccbcdbf4ffb391c00 WatchSource:0}: Error finding container 1a1f6496308bf2416d1e346ec0c61f0d9e848ad0e45e50dccbcdbf4ffb391c00: Status 404 returned error can't find the container with id 1a1f6496308bf2416d1e346ec0c61f0d9e848ad0e45e50dccbcdbf4ffb391c00 Nov 25 21:41:15 crc kubenswrapper[4910]: I1125 21:41:15.007750 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-bgkcs" event={"ID":"3b2b7b01-5b19-471d-bec2-10f3182a21cd","Type":"ContainerStarted","Data":"1a1f6496308bf2416d1e346ec0c61f0d9e848ad0e45e50dccbcdbf4ffb391c00"} Nov 25 21:41:17 crc kubenswrapper[4910]: I1125 21:41:17.019663 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-bgkcs" event={"ID":"3b2b7b01-5b19-471d-bec2-10f3182a21cd","Type":"ContainerStarted","Data":"f87a85963b25ad05b97118bc06b6dd0c9d6cafadc86935c75e82e062517dffed"} Nov 25 21:41:17 crc kubenswrapper[4910]: I1125 21:41:17.039384 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-bgkcs" podStartSLOduration=1.138848697 podStartE2EDuration="3.039366711s" podCreationTimestamp="2025-11-25 21:41:14 +0000 UTC" firstStartedPulling="2025-11-25 21:41:14.737443752 +0000 UTC m=+630.199920074" lastFinishedPulling="2025-11-25 21:41:16.637961766 +0000 UTC m=+632.100438088" observedRunningTime="2025-11-25 21:41:17.036322958 +0000 UTC m=+632.498799290" watchObservedRunningTime="2025-11-25 21:41:17.039366711 +0000 UTC m=+632.501843033" Nov 25 21:41:17 crc kubenswrapper[4910]: I1125 21:41:17.929626 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-m5hsc"] Nov 25 21:41:17 crc kubenswrapper[4910]: I1125 21:41:17.930799 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-m5hsc" Nov 25 21:41:17 crc kubenswrapper[4910]: I1125 21:41:17.934609 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-gj8m9" Nov 25 21:41:17 crc kubenswrapper[4910]: I1125 21:41:17.940755 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp"] Nov 25 21:41:17 crc kubenswrapper[4910]: I1125 21:41:17.941472 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp" Nov 25 21:41:17 crc kubenswrapper[4910]: I1125 21:41:17.945785 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-m5hsc"] Nov 25 21:41:17 crc kubenswrapper[4910]: I1125 21:41:17.947021 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 25 21:41:17 crc kubenswrapper[4910]: I1125 21:41:17.952454 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp"] Nov 25 21:41:17 crc kubenswrapper[4910]: I1125 21:41:17.991766 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mx8zz\" (UniqueName: \"kubernetes.io/projected/db10171c-40c8-4bfd-88b8-c1bd80b4e37c-kube-api-access-mx8zz\") pod \"nmstate-metrics-5dcf9c57c5-m5hsc\" (UID: \"db10171c-40c8-4bfd-88b8-c1bd80b4e37c\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-m5hsc" Nov 25 21:41:17 crc kubenswrapper[4910]: I1125 21:41:17.991835 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/f21c62ce-5e4c-4730-afa0-9d4ef734952f-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-56dnp\" (UID: \"f21c62ce-5e4c-4730-afa0-9d4ef734952f\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp" Nov 25 21:41:17 crc kubenswrapper[4910]: I1125 21:41:17.991859 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwjt2\" (UniqueName: \"kubernetes.io/projected/f21c62ce-5e4c-4730-afa0-9d4ef734952f-kube-api-access-xwjt2\") pod \"nmstate-webhook-6b89b748d8-56dnp\" (UID: \"f21c62ce-5e4c-4730-afa0-9d4ef734952f\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.000408 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-2hz8s"] Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.001134 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.079885 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd"] Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.080599 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.082750 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.085760 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-44fcc" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.086146 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.093197 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/27c220ba-4a63-4e7f-85f5-f1aa823b41cc-ovs-socket\") pod \"nmstate-handler-2hz8s\" (UID: \"27c220ba-4a63-4e7f-85f5-f1aa823b41cc\") " pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.093319 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/97f18c66-ca4f-40ce-8b4f-b43cd7a99690-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-h5pzd\" (UID: \"97f18c66-ca4f-40ce-8b4f-b43cd7a99690\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.093362 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/27c220ba-4a63-4e7f-85f5-f1aa823b41cc-dbus-socket\") pod \"nmstate-handler-2hz8s\" (UID: \"27c220ba-4a63-4e7f-85f5-f1aa823b41cc\") " pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.093395 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/97f18c66-ca4f-40ce-8b4f-b43cd7a99690-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-h5pzd\" (UID: \"97f18c66-ca4f-40ce-8b4f-b43cd7a99690\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.093439 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/27c220ba-4a63-4e7f-85f5-f1aa823b41cc-nmstate-lock\") pod \"nmstate-handler-2hz8s\" (UID: \"27c220ba-4a63-4e7f-85f5-f1aa823b41cc\") " pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.093468 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mx8zz\" (UniqueName: \"kubernetes.io/projected/db10171c-40c8-4bfd-88b8-c1bd80b4e37c-kube-api-access-mx8zz\") pod \"nmstate-metrics-5dcf9c57c5-m5hsc\" (UID: \"db10171c-40c8-4bfd-88b8-c1bd80b4e37c\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-m5hsc" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.093487 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znnnp\" (UniqueName: \"kubernetes.io/projected/97f18c66-ca4f-40ce-8b4f-b43cd7a99690-kube-api-access-znnnp\") pod \"nmstate-console-plugin-5874bd7bc5-h5pzd\" (UID: \"97f18c66-ca4f-40ce-8b4f-b43cd7a99690\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.093512 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xvkf\" (UniqueName: \"kubernetes.io/projected/27c220ba-4a63-4e7f-85f5-f1aa823b41cc-kube-api-access-9xvkf\") pod \"nmstate-handler-2hz8s\" (UID: \"27c220ba-4a63-4e7f-85f5-f1aa823b41cc\") " pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.093533 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/f21c62ce-5e4c-4730-afa0-9d4ef734952f-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-56dnp\" (UID: \"f21c62ce-5e4c-4730-afa0-9d4ef734952f\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.093551 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwjt2\" (UniqueName: \"kubernetes.io/projected/f21c62ce-5e4c-4730-afa0-9d4ef734952f-kube-api-access-xwjt2\") pod \"nmstate-webhook-6b89b748d8-56dnp\" (UID: \"f21c62ce-5e4c-4730-afa0-9d4ef734952f\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp" Nov 25 21:41:18 crc kubenswrapper[4910]: E1125 21:41:18.094417 4910 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Nov 25 21:41:18 crc kubenswrapper[4910]: E1125 21:41:18.094471 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f21c62ce-5e4c-4730-afa0-9d4ef734952f-tls-key-pair podName:f21c62ce-5e4c-4730-afa0-9d4ef734952f nodeName:}" failed. No retries permitted until 2025-11-25 21:41:18.594451756 +0000 UTC m=+634.056928068 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/f21c62ce-5e4c-4730-afa0-9d4ef734952f-tls-key-pair") pod "nmstate-webhook-6b89b748d8-56dnp" (UID: "f21c62ce-5e4c-4730-afa0-9d4ef734952f") : secret "openshift-nmstate-webhook" not found Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.117975 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwjt2\" (UniqueName: \"kubernetes.io/projected/f21c62ce-5e4c-4730-afa0-9d4ef734952f-kube-api-access-xwjt2\") pod \"nmstate-webhook-6b89b748d8-56dnp\" (UID: \"f21c62ce-5e4c-4730-afa0-9d4ef734952f\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.122522 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mx8zz\" (UniqueName: \"kubernetes.io/projected/db10171c-40c8-4bfd-88b8-c1bd80b4e37c-kube-api-access-mx8zz\") pod \"nmstate-metrics-5dcf9c57c5-m5hsc\" (UID: \"db10171c-40c8-4bfd-88b8-c1bd80b4e37c\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-m5hsc" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.128535 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd"] Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.194657 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/27c220ba-4a63-4e7f-85f5-f1aa823b41cc-dbus-socket\") pod \"nmstate-handler-2hz8s\" (UID: \"27c220ba-4a63-4e7f-85f5-f1aa823b41cc\") " pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.194955 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/97f18c66-ca4f-40ce-8b4f-b43cd7a99690-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-h5pzd\" (UID: \"97f18c66-ca4f-40ce-8b4f-b43cd7a99690\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.195031 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/27c220ba-4a63-4e7f-85f5-f1aa823b41cc-nmstate-lock\") pod \"nmstate-handler-2hz8s\" (UID: \"27c220ba-4a63-4e7f-85f5-f1aa823b41cc\") " pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.195106 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znnnp\" (UniqueName: \"kubernetes.io/projected/97f18c66-ca4f-40ce-8b4f-b43cd7a99690-kube-api-access-znnnp\") pod \"nmstate-console-plugin-5874bd7bc5-h5pzd\" (UID: \"97f18c66-ca4f-40ce-8b4f-b43cd7a99690\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.195196 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xvkf\" (UniqueName: \"kubernetes.io/projected/27c220ba-4a63-4e7f-85f5-f1aa823b41cc-kube-api-access-9xvkf\") pod \"nmstate-handler-2hz8s\" (UID: \"27c220ba-4a63-4e7f-85f5-f1aa823b41cc\") " pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.195342 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/27c220ba-4a63-4e7f-85f5-f1aa823b41cc-ovs-socket\") pod \"nmstate-handler-2hz8s\" (UID: \"27c220ba-4a63-4e7f-85f5-f1aa823b41cc\") " pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.195452 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/97f18c66-ca4f-40ce-8b4f-b43cd7a99690-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-h5pzd\" (UID: \"97f18c66-ca4f-40ce-8b4f-b43cd7a99690\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" Nov 25 21:41:18 crc kubenswrapper[4910]: E1125 21:41:18.195960 4910 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.196024 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/27c220ba-4a63-4e7f-85f5-f1aa823b41cc-dbus-socket\") pod \"nmstate-handler-2hz8s\" (UID: \"27c220ba-4a63-4e7f-85f5-f1aa823b41cc\") " pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:18 crc kubenswrapper[4910]: E1125 21:41:18.196618 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/97f18c66-ca4f-40ce-8b4f-b43cd7a99690-plugin-serving-cert podName:97f18c66-ca4f-40ce-8b4f-b43cd7a99690 nodeName:}" failed. No retries permitted until 2025-11-25 21:41:18.696580569 +0000 UTC m=+634.159056891 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/97f18c66-ca4f-40ce-8b4f-b43cd7a99690-plugin-serving-cert") pod "nmstate-console-plugin-5874bd7bc5-h5pzd" (UID: "97f18c66-ca4f-40ce-8b4f-b43cd7a99690") : secret "plugin-serving-cert" not found Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.196782 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/97f18c66-ca4f-40ce-8b4f-b43cd7a99690-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-h5pzd\" (UID: \"97f18c66-ca4f-40ce-8b4f-b43cd7a99690\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.196909 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/27c220ba-4a63-4e7f-85f5-f1aa823b41cc-ovs-socket\") pod \"nmstate-handler-2hz8s\" (UID: \"27c220ba-4a63-4e7f-85f5-f1aa823b41cc\") " pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.196947 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/27c220ba-4a63-4e7f-85f5-f1aa823b41cc-nmstate-lock\") pod \"nmstate-handler-2hz8s\" (UID: \"27c220ba-4a63-4e7f-85f5-f1aa823b41cc\") " pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.221331 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xvkf\" (UniqueName: \"kubernetes.io/projected/27c220ba-4a63-4e7f-85f5-f1aa823b41cc-kube-api-access-9xvkf\") pod \"nmstate-handler-2hz8s\" (UID: \"27c220ba-4a63-4e7f-85f5-f1aa823b41cc\") " pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.226671 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znnnp\" (UniqueName: \"kubernetes.io/projected/97f18c66-ca4f-40ce-8b4f-b43cd7a99690-kube-api-access-znnnp\") pod \"nmstate-console-plugin-5874bd7bc5-h5pzd\" (UID: \"97f18c66-ca4f-40ce-8b4f-b43cd7a99690\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.281641 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-84b86b69b6-4hhbk"] Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.283175 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.301898 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-m5hsc" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.341272 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.353636 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-84b86b69b6-4hhbk"] Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.404995 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/75298d69-0f42-4e05-a78b-69d9ff3ff650-trusted-ca-bundle\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.405073 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nbvdn\" (UniqueName: \"kubernetes.io/projected/75298d69-0f42-4e05-a78b-69d9ff3ff650-kube-api-access-nbvdn\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.405093 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/75298d69-0f42-4e05-a78b-69d9ff3ff650-service-ca\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.405152 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/75298d69-0f42-4e05-a78b-69d9ff3ff650-console-serving-cert\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.405173 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/75298d69-0f42-4e05-a78b-69d9ff3ff650-console-oauth-config\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.405191 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/75298d69-0f42-4e05-a78b-69d9ff3ff650-console-config\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.405206 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/75298d69-0f42-4e05-a78b-69d9ff3ff650-oauth-serving-cert\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: W1125 21:41:18.409277 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod27c220ba_4a63_4e7f_85f5_f1aa823b41cc.slice/crio-c762f1bea38683cb6f550c84547ae94bf57d9ec191cee309719941ab9938b6ee WatchSource:0}: Error finding container c762f1bea38683cb6f550c84547ae94bf57d9ec191cee309719941ab9938b6ee: Status 404 returned error can't find the container with id c762f1bea38683cb6f550c84547ae94bf57d9ec191cee309719941ab9938b6ee Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.506046 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/75298d69-0f42-4e05-a78b-69d9ff3ff650-console-config\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.506484 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/75298d69-0f42-4e05-a78b-69d9ff3ff650-console-oauth-config\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.506502 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/75298d69-0f42-4e05-a78b-69d9ff3ff650-oauth-serving-cert\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.506533 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/75298d69-0f42-4e05-a78b-69d9ff3ff650-trusted-ca-bundle\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.506596 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/75298d69-0f42-4e05-a78b-69d9ff3ff650-service-ca\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.506611 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nbvdn\" (UniqueName: \"kubernetes.io/projected/75298d69-0f42-4e05-a78b-69d9ff3ff650-kube-api-access-nbvdn\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.506667 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/75298d69-0f42-4e05-a78b-69d9ff3ff650-console-serving-cert\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.511801 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/75298d69-0f42-4e05-a78b-69d9ff3ff650-console-config\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.512148 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/75298d69-0f42-4e05-a78b-69d9ff3ff650-service-ca\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.512206 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/75298d69-0f42-4e05-a78b-69d9ff3ff650-oauth-serving-cert\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.514107 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/75298d69-0f42-4e05-a78b-69d9ff3ff650-trusted-ca-bundle\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.515360 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/75298d69-0f42-4e05-a78b-69d9ff3ff650-console-serving-cert\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.516316 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/75298d69-0f42-4e05-a78b-69d9ff3ff650-console-oauth-config\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.528518 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nbvdn\" (UniqueName: \"kubernetes.io/projected/75298d69-0f42-4e05-a78b-69d9ff3ff650-kube-api-access-nbvdn\") pod \"console-84b86b69b6-4hhbk\" (UID: \"75298d69-0f42-4e05-a78b-69d9ff3ff650\") " pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.607432 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/f21c62ce-5e4c-4730-afa0-9d4ef734952f-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-56dnp\" (UID: \"f21c62ce-5e4c-4730-afa0-9d4ef734952f\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.611015 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/f21c62ce-5e4c-4730-afa0-9d4ef734952f-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-56dnp\" (UID: \"f21c62ce-5e4c-4730-afa0-9d4ef734952f\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.612480 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.620124 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-m5hsc"] Nov 25 21:41:18 crc kubenswrapper[4910]: W1125 21:41:18.630376 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb10171c_40c8_4bfd_88b8_c1bd80b4e37c.slice/crio-218e7964e7b4bd609db7a317567deae0e8b8874ba7a2c16c09a83664572eecbe WatchSource:0}: Error finding container 218e7964e7b4bd609db7a317567deae0e8b8874ba7a2c16c09a83664572eecbe: Status 404 returned error can't find the container with id 218e7964e7b4bd609db7a317567deae0e8b8874ba7a2c16c09a83664572eecbe Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.657563 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.710648 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/97f18c66-ca4f-40ce-8b4f-b43cd7a99690-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-h5pzd\" (UID: \"97f18c66-ca4f-40ce-8b4f-b43cd7a99690\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.716304 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/97f18c66-ca4f-40ce-8b4f-b43cd7a99690-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-h5pzd\" (UID: \"97f18c66-ca4f-40ce-8b4f-b43cd7a99690\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.836995 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp"] Nov 25 21:41:18 crc kubenswrapper[4910]: W1125 21:41:18.847946 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf21c62ce_5e4c_4730_afa0_9d4ef734952f.slice/crio-b7d85ae9d113dda94265b14fe127f29311523af571b606ef09bc232eead44520 WatchSource:0}: Error finding container b7d85ae9d113dda94265b14fe127f29311523af571b606ef09bc232eead44520: Status 404 returned error can't find the container with id b7d85ae9d113dda94265b14fe127f29311523af571b606ef09bc232eead44520 Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.887330 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-84b86b69b6-4hhbk"] Nov 25 21:41:18 crc kubenswrapper[4910]: I1125 21:41:18.999286 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" Nov 25 21:41:19 crc kubenswrapper[4910]: I1125 21:41:19.032599 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp" event={"ID":"f21c62ce-5e4c-4730-afa0-9d4ef734952f","Type":"ContainerStarted","Data":"b7d85ae9d113dda94265b14fe127f29311523af571b606ef09bc232eead44520"} Nov 25 21:41:19 crc kubenswrapper[4910]: I1125 21:41:19.035213 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-2hz8s" event={"ID":"27c220ba-4a63-4e7f-85f5-f1aa823b41cc","Type":"ContainerStarted","Data":"c762f1bea38683cb6f550c84547ae94bf57d9ec191cee309719941ab9938b6ee"} Nov 25 21:41:19 crc kubenswrapper[4910]: I1125 21:41:19.036014 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-m5hsc" event={"ID":"db10171c-40c8-4bfd-88b8-c1bd80b4e37c","Type":"ContainerStarted","Data":"218e7964e7b4bd609db7a317567deae0e8b8874ba7a2c16c09a83664572eecbe"} Nov 25 21:41:19 crc kubenswrapper[4910]: I1125 21:41:19.040495 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84b86b69b6-4hhbk" event={"ID":"75298d69-0f42-4e05-a78b-69d9ff3ff650","Type":"ContainerStarted","Data":"eb183170bc4440ad167ac536230e7dd758966f47ae68808863830bc68bc2dcff"} Nov 25 21:41:19 crc kubenswrapper[4910]: I1125 21:41:19.062765 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-84b86b69b6-4hhbk" podStartSLOduration=1.0627428270000001 podStartE2EDuration="1.062742827s" podCreationTimestamp="2025-11-25 21:41:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:41:19.060017033 +0000 UTC m=+634.522493375" watchObservedRunningTime="2025-11-25 21:41:19.062742827 +0000 UTC m=+634.525219159" Nov 25 21:41:19 crc kubenswrapper[4910]: I1125 21:41:19.438770 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd"] Nov 25 21:41:19 crc kubenswrapper[4910]: W1125 21:41:19.448619 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod97f18c66_ca4f_40ce_8b4f_b43cd7a99690.slice/crio-4dbf4948b99462733bf8786b9eced77e67ca0c39e99b319b1d051ac457c26486 WatchSource:0}: Error finding container 4dbf4948b99462733bf8786b9eced77e67ca0c39e99b319b1d051ac457c26486: Status 404 returned error can't find the container with id 4dbf4948b99462733bf8786b9eced77e67ca0c39e99b319b1d051ac457c26486 Nov 25 21:41:20 crc kubenswrapper[4910]: I1125 21:41:20.049581 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-84b86b69b6-4hhbk" event={"ID":"75298d69-0f42-4e05-a78b-69d9ff3ff650","Type":"ContainerStarted","Data":"241276f6beb1ed9ff954f79b1e393cb29bde768753f63676c581bc3d82237994"} Nov 25 21:41:20 crc kubenswrapper[4910]: I1125 21:41:20.050574 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" event={"ID":"97f18c66-ca4f-40ce-8b4f-b43cd7a99690","Type":"ContainerStarted","Data":"4dbf4948b99462733bf8786b9eced77e67ca0c39e99b319b1d051ac457c26486"} Nov 25 21:41:22 crc kubenswrapper[4910]: I1125 21:41:22.071590 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-m5hsc" event={"ID":"db10171c-40c8-4bfd-88b8-c1bd80b4e37c","Type":"ContainerStarted","Data":"1d59cd592fe4e110de607c343c78fbe6afade15eee938eefaba1cb774c83e04c"} Nov 25 21:41:22 crc kubenswrapper[4910]: I1125 21:41:22.074669 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp" event={"ID":"f21c62ce-5e4c-4730-afa0-9d4ef734952f","Type":"ContainerStarted","Data":"88e86aa871bede67f10116899b4ab3bd40854dc3907a25e4b4f1e44b251774fb"} Nov 25 21:41:22 crc kubenswrapper[4910]: I1125 21:41:22.075155 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp" Nov 25 21:41:22 crc kubenswrapper[4910]: I1125 21:41:22.079795 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-2hz8s" event={"ID":"27c220ba-4a63-4e7f-85f5-f1aa823b41cc","Type":"ContainerStarted","Data":"f6fcf32c7b7f95d2873aff6ec66590a34f82625ae288b4e4c9f47086679336f1"} Nov 25 21:41:22 crc kubenswrapper[4910]: I1125 21:41:22.080739 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:22 crc kubenswrapper[4910]: I1125 21:41:22.119735 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp" podStartSLOduration=2.583969912 podStartE2EDuration="5.119711629s" podCreationTimestamp="2025-11-25 21:41:17 +0000 UTC" firstStartedPulling="2025-11-25 21:41:18.850893644 +0000 UTC m=+634.313369976" lastFinishedPulling="2025-11-25 21:41:21.386635371 +0000 UTC m=+636.849111693" observedRunningTime="2025-11-25 21:41:22.098169147 +0000 UTC m=+637.560645479" watchObservedRunningTime="2025-11-25 21:41:22.119711629 +0000 UTC m=+637.582187951" Nov 25 21:41:22 crc kubenswrapper[4910]: I1125 21:41:22.126832 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-2hz8s" podStartSLOduration=2.191573244 podStartE2EDuration="5.126801783s" podCreationTimestamp="2025-11-25 21:41:17 +0000 UTC" firstStartedPulling="2025-11-25 21:41:18.422107128 +0000 UTC m=+633.884583450" lastFinishedPulling="2025-11-25 21:41:21.357335667 +0000 UTC m=+636.819811989" observedRunningTime="2025-11-25 21:41:22.125840507 +0000 UTC m=+637.588316829" watchObservedRunningTime="2025-11-25 21:41:22.126801783 +0000 UTC m=+637.589278105" Nov 25 21:41:23 crc kubenswrapper[4910]: I1125 21:41:23.089429 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" event={"ID":"97f18c66-ca4f-40ce-8b4f-b43cd7a99690","Type":"ContainerStarted","Data":"a6c5aecfc4f566d74eedbbf5bcf7ade94120dae0a043fa7e5fc869747141522f"} Nov 25 21:41:23 crc kubenswrapper[4910]: I1125 21:41:23.117212 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-h5pzd" podStartSLOduration=2.06308978 podStartE2EDuration="5.117192801s" podCreationTimestamp="2025-11-25 21:41:18 +0000 UTC" firstStartedPulling="2025-11-25 21:41:19.455691841 +0000 UTC m=+634.918168183" lastFinishedPulling="2025-11-25 21:41:22.509794882 +0000 UTC m=+637.972271204" observedRunningTime="2025-11-25 21:41:23.113558891 +0000 UTC m=+638.576035213" watchObservedRunningTime="2025-11-25 21:41:23.117192801 +0000 UTC m=+638.579669123" Nov 25 21:41:25 crc kubenswrapper[4910]: I1125 21:41:25.106028 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-m5hsc" event={"ID":"db10171c-40c8-4bfd-88b8-c1bd80b4e37c","Type":"ContainerStarted","Data":"a66bc75e4c56df7d40ad0575513a0a5a9596f8bcf49c99be98cf6c2fa3f87410"} Nov 25 21:41:25 crc kubenswrapper[4910]: I1125 21:41:25.135131 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-m5hsc" podStartSLOduration=2.307988149 podStartE2EDuration="8.135103078s" podCreationTimestamp="2025-11-25 21:41:17 +0000 UTC" firstStartedPulling="2025-11-25 21:41:18.63462228 +0000 UTC m=+634.097098602" lastFinishedPulling="2025-11-25 21:41:24.461737209 +0000 UTC m=+639.924213531" observedRunningTime="2025-11-25 21:41:25.131056147 +0000 UTC m=+640.593532489" watchObservedRunningTime="2025-11-25 21:41:25.135103078 +0000 UTC m=+640.597579420" Nov 25 21:41:28 crc kubenswrapper[4910]: I1125 21:41:28.377986 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-2hz8s" Nov 25 21:41:28 crc kubenswrapper[4910]: I1125 21:41:28.657933 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:28 crc kubenswrapper[4910]: I1125 21:41:28.658035 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:28 crc kubenswrapper[4910]: I1125 21:41:28.666415 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:29 crc kubenswrapper[4910]: I1125 21:41:29.141662 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-84b86b69b6-4hhbk" Nov 25 21:41:29 crc kubenswrapper[4910]: I1125 21:41:29.216669 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-shg9w"] Nov 25 21:41:38 crc kubenswrapper[4910]: I1125 21:41:38.619427 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-56dnp" Nov 25 21:41:45 crc kubenswrapper[4910]: I1125 21:41:45.531853 4910 scope.go:117] "RemoveContainer" containerID="e8a38f416fa5b869b705845412ac538afc1107b69060a972df5b6e5dde0a10ee" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.281148 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-shg9w" podUID="fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e" containerName="console" containerID="cri-o://39106c1f1e6e3ba0cf8608eb4827924bcb9ec356311887a96579cca3c3bd198e" gracePeriod=15 Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.689439 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-shg9w_fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e/console/0.log" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.690031 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.810849 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-service-ca\") pod \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.810948 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-oauth-config\") pod \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.811006 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-oauth-serving-cert\") pod \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.811053 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-serving-cert\") pod \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.811128 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-config\") pod \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.811185 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-trusted-ca-bundle\") pod \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.811213 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2nphb\" (UniqueName: \"kubernetes.io/projected/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-kube-api-access-2nphb\") pod \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\" (UID: \"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e\") " Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.811830 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-service-ca" (OuterVolumeSpecName: "service-ca") pod "fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e" (UID: "fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.811981 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e" (UID: "fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.812407 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-config" (OuterVolumeSpecName: "console-config") pod "fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e" (UID: "fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.812484 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e" (UID: "fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.840977 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-kube-api-access-2nphb" (OuterVolumeSpecName: "kube-api-access-2nphb") pod "fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e" (UID: "fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e"). InnerVolumeSpecName "kube-api-access-2nphb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.842992 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e" (UID: "fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.855532 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e" (UID: "fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.912727 4910 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.912771 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2nphb\" (UniqueName: \"kubernetes.io/projected/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-kube-api-access-2nphb\") on node \"crc\" DevicePath \"\"" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.912784 4910 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-service-ca\") on node \"crc\" DevicePath \"\"" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.912793 4910 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.912806 4910 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.912814 4910 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 25 21:41:54 crc kubenswrapper[4910]: I1125 21:41:54.912824 4910 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e-console-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.065517 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l"] Nov 25 21:41:55 crc kubenswrapper[4910]: E1125 21:41:55.065751 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e" containerName="console" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.065764 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e" containerName="console" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.065863 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e" containerName="console" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.066660 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.068600 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.077956 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l"] Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.115729 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/52380560-51e8-43b1-9b6e-8036f43b20c3-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l\" (UID: \"52380560-51e8-43b1-9b6e-8036f43b20c3\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.115951 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/52380560-51e8-43b1-9b6e-8036f43b20c3-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l\" (UID: \"52380560-51e8-43b1-9b6e-8036f43b20c3\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.116021 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqrmn\" (UniqueName: \"kubernetes.io/projected/52380560-51e8-43b1-9b6e-8036f43b20c3-kube-api-access-zqrmn\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l\" (UID: \"52380560-51e8-43b1-9b6e-8036f43b20c3\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.222198 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/52380560-51e8-43b1-9b6e-8036f43b20c3-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l\" (UID: \"52380560-51e8-43b1-9b6e-8036f43b20c3\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.222292 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqrmn\" (UniqueName: \"kubernetes.io/projected/52380560-51e8-43b1-9b6e-8036f43b20c3-kube-api-access-zqrmn\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l\" (UID: \"52380560-51e8-43b1-9b6e-8036f43b20c3\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.222392 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/52380560-51e8-43b1-9b6e-8036f43b20c3-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l\" (UID: \"52380560-51e8-43b1-9b6e-8036f43b20c3\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.222986 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/52380560-51e8-43b1-9b6e-8036f43b20c3-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l\" (UID: \"52380560-51e8-43b1-9b6e-8036f43b20c3\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.224602 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/52380560-51e8-43b1-9b6e-8036f43b20c3-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l\" (UID: \"52380560-51e8-43b1-9b6e-8036f43b20c3\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.262970 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqrmn\" (UniqueName: \"kubernetes.io/projected/52380560-51e8-43b1-9b6e-8036f43b20c3-kube-api-access-zqrmn\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l\" (UID: \"52380560-51e8-43b1-9b6e-8036f43b20c3\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.311367 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-shg9w_fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e/console/0.log" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.311428 4910 generic.go:334] "Generic (PLEG): container finished" podID="fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e" containerID="39106c1f1e6e3ba0cf8608eb4827924bcb9ec356311887a96579cca3c3bd198e" exitCode=2 Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.311465 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-shg9w" event={"ID":"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e","Type":"ContainerDied","Data":"39106c1f1e6e3ba0cf8608eb4827924bcb9ec356311887a96579cca3c3bd198e"} Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.311498 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-shg9w" event={"ID":"fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e","Type":"ContainerDied","Data":"4d060d92ece7a4407da367900c7481dd35de0eb68bffa209284d4a1309ed965b"} Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.311524 4910 scope.go:117] "RemoveContainer" containerID="39106c1f1e6e3ba0cf8608eb4827924bcb9ec356311887a96579cca3c3bd198e" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.311619 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-shg9w" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.331703 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-shg9w"] Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.337509 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-shg9w"] Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.337615 4910 scope.go:117] "RemoveContainer" containerID="39106c1f1e6e3ba0cf8608eb4827924bcb9ec356311887a96579cca3c3bd198e" Nov 25 21:41:55 crc kubenswrapper[4910]: E1125 21:41:55.338516 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39106c1f1e6e3ba0cf8608eb4827924bcb9ec356311887a96579cca3c3bd198e\": container with ID starting with 39106c1f1e6e3ba0cf8608eb4827924bcb9ec356311887a96579cca3c3bd198e not found: ID does not exist" containerID="39106c1f1e6e3ba0cf8608eb4827924bcb9ec356311887a96579cca3c3bd198e" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.338545 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39106c1f1e6e3ba0cf8608eb4827924bcb9ec356311887a96579cca3c3bd198e"} err="failed to get container status \"39106c1f1e6e3ba0cf8608eb4827924bcb9ec356311887a96579cca3c3bd198e\": rpc error: code = NotFound desc = could not find container \"39106c1f1e6e3ba0cf8608eb4827924bcb9ec356311887a96579cca3c3bd198e\": container with ID starting with 39106c1f1e6e3ba0cf8608eb4827924bcb9ec356311887a96579cca3c3bd198e not found: ID does not exist" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.379427 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" Nov 25 21:41:55 crc kubenswrapper[4910]: I1125 21:41:55.653580 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l"] Nov 25 21:41:56 crc kubenswrapper[4910]: I1125 21:41:56.321329 4910 generic.go:334] "Generic (PLEG): container finished" podID="52380560-51e8-43b1-9b6e-8036f43b20c3" containerID="99724ad18fb054cb107165ca4af38b04dd5c1feb8e29e944d3380a2d1c51e2c6" exitCode=0 Nov 25 21:41:56 crc kubenswrapper[4910]: I1125 21:41:56.321473 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" event={"ID":"52380560-51e8-43b1-9b6e-8036f43b20c3","Type":"ContainerDied","Data":"99724ad18fb054cb107165ca4af38b04dd5c1feb8e29e944d3380a2d1c51e2c6"} Nov 25 21:41:56 crc kubenswrapper[4910]: I1125 21:41:56.322502 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" event={"ID":"52380560-51e8-43b1-9b6e-8036f43b20c3","Type":"ContainerStarted","Data":"6dc933a6739f981b16321b7ce04521598a7c0aaa8406f8923dbe7db17afcb30f"} Nov 25 21:41:57 crc kubenswrapper[4910]: I1125 21:41:57.215180 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e" path="/var/lib/kubelet/pods/fe723e1f-cb7a-4b32-bb98-ba3787c7fa3e/volumes" Nov 25 21:41:58 crc kubenswrapper[4910]: I1125 21:41:58.387098 4910 generic.go:334] "Generic (PLEG): container finished" podID="52380560-51e8-43b1-9b6e-8036f43b20c3" containerID="c0926d1e836d50291f7e833ece5b73033d75a0db24b9cf0e8931f2e9361b6b32" exitCode=0 Nov 25 21:41:58 crc kubenswrapper[4910]: I1125 21:41:58.387545 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" event={"ID":"52380560-51e8-43b1-9b6e-8036f43b20c3","Type":"ContainerDied","Data":"c0926d1e836d50291f7e833ece5b73033d75a0db24b9cf0e8931f2e9361b6b32"} Nov 25 21:41:59 crc kubenswrapper[4910]: I1125 21:41:59.400425 4910 generic.go:334] "Generic (PLEG): container finished" podID="52380560-51e8-43b1-9b6e-8036f43b20c3" containerID="81670626b79aa498f52afaf26667f5b54b6d82fbc395549e37dd83404d989e42" exitCode=0 Nov 25 21:41:59 crc kubenswrapper[4910]: I1125 21:41:59.400509 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" event={"ID":"52380560-51e8-43b1-9b6e-8036f43b20c3","Type":"ContainerDied","Data":"81670626b79aa498f52afaf26667f5b54b6d82fbc395549e37dd83404d989e42"} Nov 25 21:42:00 crc kubenswrapper[4910]: I1125 21:42:00.773794 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" Nov 25 21:42:00 crc kubenswrapper[4910]: I1125 21:42:00.811087 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/52380560-51e8-43b1-9b6e-8036f43b20c3-util\") pod \"52380560-51e8-43b1-9b6e-8036f43b20c3\" (UID: \"52380560-51e8-43b1-9b6e-8036f43b20c3\") " Nov 25 21:42:00 crc kubenswrapper[4910]: I1125 21:42:00.811151 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/52380560-51e8-43b1-9b6e-8036f43b20c3-bundle\") pod \"52380560-51e8-43b1-9b6e-8036f43b20c3\" (UID: \"52380560-51e8-43b1-9b6e-8036f43b20c3\") " Nov 25 21:42:00 crc kubenswrapper[4910]: I1125 21:42:00.811358 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqrmn\" (UniqueName: \"kubernetes.io/projected/52380560-51e8-43b1-9b6e-8036f43b20c3-kube-api-access-zqrmn\") pod \"52380560-51e8-43b1-9b6e-8036f43b20c3\" (UID: \"52380560-51e8-43b1-9b6e-8036f43b20c3\") " Nov 25 21:42:00 crc kubenswrapper[4910]: I1125 21:42:00.815295 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52380560-51e8-43b1-9b6e-8036f43b20c3-bundle" (OuterVolumeSpecName: "bundle") pod "52380560-51e8-43b1-9b6e-8036f43b20c3" (UID: "52380560-51e8-43b1-9b6e-8036f43b20c3"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:42:00 crc kubenswrapper[4910]: I1125 21:42:00.819315 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52380560-51e8-43b1-9b6e-8036f43b20c3-kube-api-access-zqrmn" (OuterVolumeSpecName: "kube-api-access-zqrmn") pod "52380560-51e8-43b1-9b6e-8036f43b20c3" (UID: "52380560-51e8-43b1-9b6e-8036f43b20c3"). InnerVolumeSpecName "kube-api-access-zqrmn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:42:00 crc kubenswrapper[4910]: I1125 21:42:00.840713 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/52380560-51e8-43b1-9b6e-8036f43b20c3-util" (OuterVolumeSpecName: "util") pod "52380560-51e8-43b1-9b6e-8036f43b20c3" (UID: "52380560-51e8-43b1-9b6e-8036f43b20c3"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:42:00 crc kubenswrapper[4910]: I1125 21:42:00.913624 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqrmn\" (UniqueName: \"kubernetes.io/projected/52380560-51e8-43b1-9b6e-8036f43b20c3-kube-api-access-zqrmn\") on node \"crc\" DevicePath \"\"" Nov 25 21:42:00 crc kubenswrapper[4910]: I1125 21:42:00.913687 4910 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/52380560-51e8-43b1-9b6e-8036f43b20c3-util\") on node \"crc\" DevicePath \"\"" Nov 25 21:42:00 crc kubenswrapper[4910]: I1125 21:42:00.913710 4910 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/52380560-51e8-43b1-9b6e-8036f43b20c3-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:42:01 crc kubenswrapper[4910]: I1125 21:42:01.426525 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" event={"ID":"52380560-51e8-43b1-9b6e-8036f43b20c3","Type":"ContainerDied","Data":"6dc933a6739f981b16321b7ce04521598a7c0aaa8406f8923dbe7db17afcb30f"} Nov 25 21:42:01 crc kubenswrapper[4910]: I1125 21:42:01.426600 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6dc933a6739f981b16321b7ce04521598a7c0aaa8406f8923dbe7db17afcb30f" Nov 25 21:42:01 crc kubenswrapper[4910]: I1125 21:42:01.426678 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.254739 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28"] Nov 25 21:42:10 crc kubenswrapper[4910]: E1125 21:42:10.255628 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52380560-51e8-43b1-9b6e-8036f43b20c3" containerName="util" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.255648 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="52380560-51e8-43b1-9b6e-8036f43b20c3" containerName="util" Nov 25 21:42:10 crc kubenswrapper[4910]: E1125 21:42:10.255669 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52380560-51e8-43b1-9b6e-8036f43b20c3" containerName="pull" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.255679 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="52380560-51e8-43b1-9b6e-8036f43b20c3" containerName="pull" Nov 25 21:42:10 crc kubenswrapper[4910]: E1125 21:42:10.255701 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52380560-51e8-43b1-9b6e-8036f43b20c3" containerName="extract" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.255710 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="52380560-51e8-43b1-9b6e-8036f43b20c3" containerName="extract" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.255876 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="52380560-51e8-43b1-9b6e-8036f43b20c3" containerName="extract" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.256605 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.258046 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.261295 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-dwqs5" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.261678 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.261869 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.262416 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.275629 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28"] Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.355928 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kns6\" (UniqueName: \"kubernetes.io/projected/6299e276-3b3c-4c65-abab-321a1129c175-kube-api-access-4kns6\") pod \"metallb-operator-controller-manager-9c5c567bb-p4k28\" (UID: \"6299e276-3b3c-4c65-abab-321a1129c175\") " pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.356456 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6299e276-3b3c-4c65-abab-321a1129c175-apiservice-cert\") pod \"metallb-operator-controller-manager-9c5c567bb-p4k28\" (UID: \"6299e276-3b3c-4c65-abab-321a1129c175\") " pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.356780 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6299e276-3b3c-4c65-abab-321a1129c175-webhook-cert\") pod \"metallb-operator-controller-manager-9c5c567bb-p4k28\" (UID: \"6299e276-3b3c-4c65-abab-321a1129c175\") " pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.458656 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6299e276-3b3c-4c65-abab-321a1129c175-webhook-cert\") pod \"metallb-operator-controller-manager-9c5c567bb-p4k28\" (UID: \"6299e276-3b3c-4c65-abab-321a1129c175\") " pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.458723 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kns6\" (UniqueName: \"kubernetes.io/projected/6299e276-3b3c-4c65-abab-321a1129c175-kube-api-access-4kns6\") pod \"metallb-operator-controller-manager-9c5c567bb-p4k28\" (UID: \"6299e276-3b3c-4c65-abab-321a1129c175\") " pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.458770 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6299e276-3b3c-4c65-abab-321a1129c175-apiservice-cert\") pod \"metallb-operator-controller-manager-9c5c567bb-p4k28\" (UID: \"6299e276-3b3c-4c65-abab-321a1129c175\") " pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.466850 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6299e276-3b3c-4c65-abab-321a1129c175-apiservice-cert\") pod \"metallb-operator-controller-manager-9c5c567bb-p4k28\" (UID: \"6299e276-3b3c-4c65-abab-321a1129c175\") " pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.473133 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6299e276-3b3c-4c65-abab-321a1129c175-webhook-cert\") pod \"metallb-operator-controller-manager-9c5c567bb-p4k28\" (UID: \"6299e276-3b3c-4c65-abab-321a1129c175\") " pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.484005 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kns6\" (UniqueName: \"kubernetes.io/projected/6299e276-3b3c-4c65-abab-321a1129c175-kube-api-access-4kns6\") pod \"metallb-operator-controller-manager-9c5c567bb-p4k28\" (UID: \"6299e276-3b3c-4c65-abab-321a1129c175\") " pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.493517 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s"] Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.494508 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.496654 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-vpjcl" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.496671 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.497007 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.513761 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s"] Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.560144 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7cd2774b-6d1f-4fc6-811e-a13f715832ab-webhook-cert\") pod \"metallb-operator-webhook-server-747dcffbf8-gz64s\" (UID: \"7cd2774b-6d1f-4fc6-811e-a13f715832ab\") " pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.560206 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7cd2774b-6d1f-4fc6-811e-a13f715832ab-apiservice-cert\") pod \"metallb-operator-webhook-server-747dcffbf8-gz64s\" (UID: \"7cd2774b-6d1f-4fc6-811e-a13f715832ab\") " pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.560259 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xf9nv\" (UniqueName: \"kubernetes.io/projected/7cd2774b-6d1f-4fc6-811e-a13f715832ab-kube-api-access-xf9nv\") pod \"metallb-operator-webhook-server-747dcffbf8-gz64s\" (UID: \"7cd2774b-6d1f-4fc6-811e-a13f715832ab\") " pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.576031 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.662103 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7cd2774b-6d1f-4fc6-811e-a13f715832ab-webhook-cert\") pod \"metallb-operator-webhook-server-747dcffbf8-gz64s\" (UID: \"7cd2774b-6d1f-4fc6-811e-a13f715832ab\") " pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.662153 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7cd2774b-6d1f-4fc6-811e-a13f715832ab-apiservice-cert\") pod \"metallb-operator-webhook-server-747dcffbf8-gz64s\" (UID: \"7cd2774b-6d1f-4fc6-811e-a13f715832ab\") " pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.662190 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xf9nv\" (UniqueName: \"kubernetes.io/projected/7cd2774b-6d1f-4fc6-811e-a13f715832ab-kube-api-access-xf9nv\") pod \"metallb-operator-webhook-server-747dcffbf8-gz64s\" (UID: \"7cd2774b-6d1f-4fc6-811e-a13f715832ab\") " pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.682232 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/7cd2774b-6d1f-4fc6-811e-a13f715832ab-webhook-cert\") pod \"metallb-operator-webhook-server-747dcffbf8-gz64s\" (UID: \"7cd2774b-6d1f-4fc6-811e-a13f715832ab\") " pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.685908 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/7cd2774b-6d1f-4fc6-811e-a13f715832ab-apiservice-cert\") pod \"metallb-operator-webhook-server-747dcffbf8-gz64s\" (UID: \"7cd2774b-6d1f-4fc6-811e-a13f715832ab\") " pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.716648 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xf9nv\" (UniqueName: \"kubernetes.io/projected/7cd2774b-6d1f-4fc6-811e-a13f715832ab-kube-api-access-xf9nv\") pod \"metallb-operator-webhook-server-747dcffbf8-gz64s\" (UID: \"7cd2774b-6d1f-4fc6-811e-a13f715832ab\") " pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.834604 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" Nov 25 21:42:10 crc kubenswrapper[4910]: I1125 21:42:10.915733 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28"] Nov 25 21:42:10 crc kubenswrapper[4910]: W1125 21:42:10.955900 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6299e276_3b3c_4c65_abab_321a1129c175.slice/crio-e89190f53a45d5d3ec9ecb7e3a24ca49884d07d2a195f7a1fff8e51afea138a5 WatchSource:0}: Error finding container e89190f53a45d5d3ec9ecb7e3a24ca49884d07d2a195f7a1fff8e51afea138a5: Status 404 returned error can't find the container with id e89190f53a45d5d3ec9ecb7e3a24ca49884d07d2a195f7a1fff8e51afea138a5 Nov 25 21:42:11 crc kubenswrapper[4910]: I1125 21:42:11.137630 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s"] Nov 25 21:42:11 crc kubenswrapper[4910]: W1125 21:42:11.142314 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7cd2774b_6d1f_4fc6_811e_a13f715832ab.slice/crio-b6d5bac851d4fd5b06b2e2c88d60d6c8293b18e2caf52dc072385caf7a8b6534 WatchSource:0}: Error finding container b6d5bac851d4fd5b06b2e2c88d60d6c8293b18e2caf52dc072385caf7a8b6534: Status 404 returned error can't find the container with id b6d5bac851d4fd5b06b2e2c88d60d6c8293b18e2caf52dc072385caf7a8b6534 Nov 25 21:42:11 crc kubenswrapper[4910]: I1125 21:42:11.492806 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" event={"ID":"7cd2774b-6d1f-4fc6-811e-a13f715832ab","Type":"ContainerStarted","Data":"b6d5bac851d4fd5b06b2e2c88d60d6c8293b18e2caf52dc072385caf7a8b6534"} Nov 25 21:42:11 crc kubenswrapper[4910]: I1125 21:42:11.493868 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" event={"ID":"6299e276-3b3c-4c65-abab-321a1129c175","Type":"ContainerStarted","Data":"e89190f53a45d5d3ec9ecb7e3a24ca49884d07d2a195f7a1fff8e51afea138a5"} Nov 25 21:42:14 crc kubenswrapper[4910]: I1125 21:42:14.523095 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" event={"ID":"6299e276-3b3c-4c65-abab-321a1129c175","Type":"ContainerStarted","Data":"32b7c287431edcdcae00fbfa2937600fe9533e8c914e08e742e60b4a7e1d532b"} Nov 25 21:42:14 crc kubenswrapper[4910]: I1125 21:42:14.523258 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" Nov 25 21:42:14 crc kubenswrapper[4910]: I1125 21:42:14.546448 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" podStartSLOduration=1.781126947 podStartE2EDuration="4.546427654s" podCreationTimestamp="2025-11-25 21:42:10 +0000 UTC" firstStartedPulling="2025-11-25 21:42:10.962349311 +0000 UTC m=+686.424825633" lastFinishedPulling="2025-11-25 21:42:13.727650018 +0000 UTC m=+689.190126340" observedRunningTime="2025-11-25 21:42:14.544845681 +0000 UTC m=+690.007322003" watchObservedRunningTime="2025-11-25 21:42:14.546427654 +0000 UTC m=+690.008903986" Nov 25 21:42:16 crc kubenswrapper[4910]: I1125 21:42:16.550996 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" event={"ID":"7cd2774b-6d1f-4fc6-811e-a13f715832ab","Type":"ContainerStarted","Data":"1cc9957c256f65bb0fa01fe8b812985265887fb0a5f15737b594777cbb37ba2e"} Nov 25 21:42:16 crc kubenswrapper[4910]: I1125 21:42:16.551425 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" Nov 25 21:42:16 crc kubenswrapper[4910]: I1125 21:42:16.584856 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" podStartSLOduration=2.050027545 podStartE2EDuration="6.584836876s" podCreationTimestamp="2025-11-25 21:42:10 +0000 UTC" firstStartedPulling="2025-11-25 21:42:11.145388553 +0000 UTC m=+686.607864875" lastFinishedPulling="2025-11-25 21:42:15.680197884 +0000 UTC m=+691.142674206" observedRunningTime="2025-11-25 21:42:16.579666224 +0000 UTC m=+692.042142566" watchObservedRunningTime="2025-11-25 21:42:16.584836876 +0000 UTC m=+692.047313198" Nov 25 21:42:23 crc kubenswrapper[4910]: I1125 21:42:23.098867 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:42:23 crc kubenswrapper[4910]: I1125 21:42:23.099313 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:42:30 crc kubenswrapper[4910]: I1125 21:42:30.839045 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-747dcffbf8-gz64s" Nov 25 21:42:50 crc kubenswrapper[4910]: I1125 21:42:50.578202 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-9c5c567bb-p4k28" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.493196 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-csqvk"] Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.494172 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-csqvk" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.497635 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-4btp2" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.497929 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.513346 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-qw4hv"] Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.516660 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.519472 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.521497 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-csqvk"] Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.532159 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.572993 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5f6618c7-ba0f-45ce-a1f1-d42f55e72500-cert\") pod \"frr-k8s-webhook-server-6998585d5-csqvk\" (UID: \"5f6618c7-ba0f-45ce-a1f1-d42f55e72500\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-csqvk" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.573046 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9d4be312-9921-4b2e-8456-acd2b0f012de-frr-startup\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.573068 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9d4be312-9921-4b2e-8456-acd2b0f012de-frr-sockets\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.573167 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5g75\" (UniqueName: \"kubernetes.io/projected/5f6618c7-ba0f-45ce-a1f1-d42f55e72500-kube-api-access-f5g75\") pod \"frr-k8s-webhook-server-6998585d5-csqvk\" (UID: \"5f6618c7-ba0f-45ce-a1f1-d42f55e72500\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-csqvk" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.573222 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9d4be312-9921-4b2e-8456-acd2b0f012de-metrics\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.573305 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljfx9\" (UniqueName: \"kubernetes.io/projected/9d4be312-9921-4b2e-8456-acd2b0f012de-kube-api-access-ljfx9\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.573370 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9d4be312-9921-4b2e-8456-acd2b0f012de-reloader\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.573391 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d4be312-9921-4b2e-8456-acd2b0f012de-metrics-certs\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.573420 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9d4be312-9921-4b2e-8456-acd2b0f012de-frr-conf\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.582902 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-fmrxg"] Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.583874 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-fmrxg" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.585994 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.585995 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.586137 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.586837 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-xpc6t" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.596882 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-57bhp"] Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.597860 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-57bhp" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.601927 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.618256 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-57bhp"] Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.674678 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9d4be312-9921-4b2e-8456-acd2b0f012de-reloader\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.674721 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d4be312-9921-4b2e-8456-acd2b0f012de-metrics-certs\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.674744 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9d4be312-9921-4b2e-8456-acd2b0f012de-frr-conf\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.674772 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5f6618c7-ba0f-45ce-a1f1-d42f55e72500-cert\") pod \"frr-k8s-webhook-server-6998585d5-csqvk\" (UID: \"5f6618c7-ba0f-45ce-a1f1-d42f55e72500\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-csqvk" Nov 25 21:42:51 crc kubenswrapper[4910]: E1125 21:42:51.674855 4910 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Nov 25 21:42:51 crc kubenswrapper[4910]: E1125 21:42:51.674911 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5f6618c7-ba0f-45ce-a1f1-d42f55e72500-cert podName:5f6618c7-ba0f-45ce-a1f1-d42f55e72500 nodeName:}" failed. No retries permitted until 2025-11-25 21:42:52.174893407 +0000 UTC m=+727.637369719 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/5f6618c7-ba0f-45ce-a1f1-d42f55e72500-cert") pod "frr-k8s-webhook-server-6998585d5-csqvk" (UID: "5f6618c7-ba0f-45ce-a1f1-d42f55e72500") : secret "frr-k8s-webhook-server-cert" not found Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.674974 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9d4be312-9921-4b2e-8456-acd2b0f012de-frr-startup\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.675022 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9d4be312-9921-4b2e-8456-acd2b0f012de-frr-sockets\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.675048 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5g75\" (UniqueName: \"kubernetes.io/projected/5f6618c7-ba0f-45ce-a1f1-d42f55e72500-kube-api-access-f5g75\") pod \"frr-k8s-webhook-server-6998585d5-csqvk\" (UID: \"5f6618c7-ba0f-45ce-a1f1-d42f55e72500\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-csqvk" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.675072 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9d4be312-9921-4b2e-8456-acd2b0f012de-metrics\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.675099 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljfx9\" (UniqueName: \"kubernetes.io/projected/9d4be312-9921-4b2e-8456-acd2b0f012de-kube-api-access-ljfx9\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.675292 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/9d4be312-9921-4b2e-8456-acd2b0f012de-frr-conf\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.675378 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/9d4be312-9921-4b2e-8456-acd2b0f012de-reloader\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.675462 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/9d4be312-9921-4b2e-8456-acd2b0f012de-frr-sockets\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.675523 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/9d4be312-9921-4b2e-8456-acd2b0f012de-metrics\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.675972 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/9d4be312-9921-4b2e-8456-acd2b0f012de-frr-startup\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.680709 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9d4be312-9921-4b2e-8456-acd2b0f012de-metrics-certs\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.691530 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5g75\" (UniqueName: \"kubernetes.io/projected/5f6618c7-ba0f-45ce-a1f1-d42f55e72500-kube-api-access-f5g75\") pod \"frr-k8s-webhook-server-6998585d5-csqvk\" (UID: \"5f6618c7-ba0f-45ce-a1f1-d42f55e72500\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-csqvk" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.700302 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljfx9\" (UniqueName: \"kubernetes.io/projected/9d4be312-9921-4b2e-8456-acd2b0f012de-kube-api-access-ljfx9\") pod \"frr-k8s-qw4hv\" (UID: \"9d4be312-9921-4b2e-8456-acd2b0f012de\") " pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.777003 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8517a279-1eca-4be6-a4c0-09716207a094-cert\") pod \"controller-6c7b4b5f48-57bhp\" (UID: \"8517a279-1eca-4be6-a4c0-09716207a094\") " pod="metallb-system/controller-6c7b4b5f48-57bhp" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.777627 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6dcf2928-8050-4c63-9035-35b85bb922ce-metrics-certs\") pod \"speaker-fmrxg\" (UID: \"6dcf2928-8050-4c63-9035-35b85bb922ce\") " pod="metallb-system/speaker-fmrxg" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.777869 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/6dcf2928-8050-4c63-9035-35b85bb922ce-memberlist\") pod \"speaker-fmrxg\" (UID: \"6dcf2928-8050-4c63-9035-35b85bb922ce\") " pod="metallb-system/speaker-fmrxg" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.778125 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4xf9\" (UniqueName: \"kubernetes.io/projected/6dcf2928-8050-4c63-9035-35b85bb922ce-kube-api-access-h4xf9\") pod \"speaker-fmrxg\" (UID: \"6dcf2928-8050-4c63-9035-35b85bb922ce\") " pod="metallb-system/speaker-fmrxg" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.778390 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/6dcf2928-8050-4c63-9035-35b85bb922ce-metallb-excludel2\") pod \"speaker-fmrxg\" (UID: \"6dcf2928-8050-4c63-9035-35b85bb922ce\") " pod="metallb-system/speaker-fmrxg" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.778425 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8517a279-1eca-4be6-a4c0-09716207a094-metrics-certs\") pod \"controller-6c7b4b5f48-57bhp\" (UID: \"8517a279-1eca-4be6-a4c0-09716207a094\") " pod="metallb-system/controller-6c7b4b5f48-57bhp" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.778463 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ncplv\" (UniqueName: \"kubernetes.io/projected/8517a279-1eca-4be6-a4c0-09716207a094-kube-api-access-ncplv\") pod \"controller-6c7b4b5f48-57bhp\" (UID: \"8517a279-1eca-4be6-a4c0-09716207a094\") " pod="metallb-system/controller-6c7b4b5f48-57bhp" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.833218 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.880431 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6dcf2928-8050-4c63-9035-35b85bb922ce-metrics-certs\") pod \"speaker-fmrxg\" (UID: \"6dcf2928-8050-4c63-9035-35b85bb922ce\") " pod="metallb-system/speaker-fmrxg" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.880543 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/6dcf2928-8050-4c63-9035-35b85bb922ce-memberlist\") pod \"speaker-fmrxg\" (UID: \"6dcf2928-8050-4c63-9035-35b85bb922ce\") " pod="metallb-system/speaker-fmrxg" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.880603 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4xf9\" (UniqueName: \"kubernetes.io/projected/6dcf2928-8050-4c63-9035-35b85bb922ce-kube-api-access-h4xf9\") pod \"speaker-fmrxg\" (UID: \"6dcf2928-8050-4c63-9035-35b85bb922ce\") " pod="metallb-system/speaker-fmrxg" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.880743 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/6dcf2928-8050-4c63-9035-35b85bb922ce-metallb-excludel2\") pod \"speaker-fmrxg\" (UID: \"6dcf2928-8050-4c63-9035-35b85bb922ce\") " pod="metallb-system/speaker-fmrxg" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.880782 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8517a279-1eca-4be6-a4c0-09716207a094-metrics-certs\") pod \"controller-6c7b4b5f48-57bhp\" (UID: \"8517a279-1eca-4be6-a4c0-09716207a094\") " pod="metallb-system/controller-6c7b4b5f48-57bhp" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.880853 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ncplv\" (UniqueName: \"kubernetes.io/projected/8517a279-1eca-4be6-a4c0-09716207a094-kube-api-access-ncplv\") pod \"controller-6c7b4b5f48-57bhp\" (UID: \"8517a279-1eca-4be6-a4c0-09716207a094\") " pod="metallb-system/controller-6c7b4b5f48-57bhp" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.880929 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8517a279-1eca-4be6-a4c0-09716207a094-cert\") pod \"controller-6c7b4b5f48-57bhp\" (UID: \"8517a279-1eca-4be6-a4c0-09716207a094\") " pod="metallb-system/controller-6c7b4b5f48-57bhp" Nov 25 21:42:51 crc kubenswrapper[4910]: E1125 21:42:51.880747 4910 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 21:42:51 crc kubenswrapper[4910]: E1125 21:42:51.881067 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6dcf2928-8050-4c63-9035-35b85bb922ce-memberlist podName:6dcf2928-8050-4c63-9035-35b85bb922ce nodeName:}" failed. No retries permitted until 2025-11-25 21:42:52.381043992 +0000 UTC m=+727.843520314 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/6dcf2928-8050-4c63-9035-35b85bb922ce-memberlist") pod "speaker-fmrxg" (UID: "6dcf2928-8050-4c63-9035-35b85bb922ce") : secret "metallb-memberlist" not found Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.881756 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/6dcf2928-8050-4c63-9035-35b85bb922ce-metallb-excludel2\") pod \"speaker-fmrxg\" (UID: \"6dcf2928-8050-4c63-9035-35b85bb922ce\") " pod="metallb-system/speaker-fmrxg" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.883565 4910 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.884927 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/6dcf2928-8050-4c63-9035-35b85bb922ce-metrics-certs\") pod \"speaker-fmrxg\" (UID: \"6dcf2928-8050-4c63-9035-35b85bb922ce\") " pod="metallb-system/speaker-fmrxg" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.885065 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8517a279-1eca-4be6-a4c0-09716207a094-metrics-certs\") pod \"controller-6c7b4b5f48-57bhp\" (UID: \"8517a279-1eca-4be6-a4c0-09716207a094\") " pod="metallb-system/controller-6c7b4b5f48-57bhp" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.895630 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8517a279-1eca-4be6-a4c0-09716207a094-cert\") pod \"controller-6c7b4b5f48-57bhp\" (UID: \"8517a279-1eca-4be6-a4c0-09716207a094\") " pod="metallb-system/controller-6c7b4b5f48-57bhp" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.901750 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ncplv\" (UniqueName: \"kubernetes.io/projected/8517a279-1eca-4be6-a4c0-09716207a094-kube-api-access-ncplv\") pod \"controller-6c7b4b5f48-57bhp\" (UID: \"8517a279-1eca-4be6-a4c0-09716207a094\") " pod="metallb-system/controller-6c7b4b5f48-57bhp" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.909179 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4xf9\" (UniqueName: \"kubernetes.io/projected/6dcf2928-8050-4c63-9035-35b85bb922ce-kube-api-access-h4xf9\") pod \"speaker-fmrxg\" (UID: \"6dcf2928-8050-4c63-9035-35b85bb922ce\") " pod="metallb-system/speaker-fmrxg" Nov 25 21:42:51 crc kubenswrapper[4910]: I1125 21:42:51.915729 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-57bhp" Nov 25 21:42:52 crc kubenswrapper[4910]: I1125 21:42:52.159291 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-57bhp"] Nov 25 21:42:52 crc kubenswrapper[4910]: I1125 21:42:52.187110 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5f6618c7-ba0f-45ce-a1f1-d42f55e72500-cert\") pod \"frr-k8s-webhook-server-6998585d5-csqvk\" (UID: \"5f6618c7-ba0f-45ce-a1f1-d42f55e72500\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-csqvk" Nov 25 21:42:52 crc kubenswrapper[4910]: I1125 21:42:52.193202 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/5f6618c7-ba0f-45ce-a1f1-d42f55e72500-cert\") pod \"frr-k8s-webhook-server-6998585d5-csqvk\" (UID: \"5f6618c7-ba0f-45ce-a1f1-d42f55e72500\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-csqvk" Nov 25 21:42:52 crc kubenswrapper[4910]: I1125 21:42:52.389823 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/6dcf2928-8050-4c63-9035-35b85bb922ce-memberlist\") pod \"speaker-fmrxg\" (UID: \"6dcf2928-8050-4c63-9035-35b85bb922ce\") " pod="metallb-system/speaker-fmrxg" Nov 25 21:42:52 crc kubenswrapper[4910]: E1125 21:42:52.389930 4910 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 25 21:42:52 crc kubenswrapper[4910]: E1125 21:42:52.389990 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6dcf2928-8050-4c63-9035-35b85bb922ce-memberlist podName:6dcf2928-8050-4c63-9035-35b85bb922ce nodeName:}" failed. No retries permitted until 2025-11-25 21:42:53.389974562 +0000 UTC m=+728.852450884 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/6dcf2928-8050-4c63-9035-35b85bb922ce-memberlist") pod "speaker-fmrxg" (UID: "6dcf2928-8050-4c63-9035-35b85bb922ce") : secret "metallb-memberlist" not found Nov 25 21:42:52 crc kubenswrapper[4910]: I1125 21:42:52.413851 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-csqvk" Nov 25 21:42:52 crc kubenswrapper[4910]: I1125 21:42:52.613746 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-csqvk"] Nov 25 21:42:52 crc kubenswrapper[4910]: W1125 21:42:52.615115 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5f6618c7_ba0f_45ce_a1f1_d42f55e72500.slice/crio-e5e208cf95eb87c2c8ed691e8d1eb547e1f171a079e590629c0f00f5d1e6fb56 WatchSource:0}: Error finding container e5e208cf95eb87c2c8ed691e8d1eb547e1f171a079e590629c0f00f5d1e6fb56: Status 404 returned error can't find the container with id e5e208cf95eb87c2c8ed691e8d1eb547e1f171a079e590629c0f00f5d1e6fb56 Nov 25 21:42:52 crc kubenswrapper[4910]: I1125 21:42:52.783823 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-57bhp" event={"ID":"8517a279-1eca-4be6-a4c0-09716207a094","Type":"ContainerStarted","Data":"aa358f7d427109d58b4586e3a1e10174b759446cc9237af92ca52965e9ec2072"} Nov 25 21:42:52 crc kubenswrapper[4910]: I1125 21:42:52.783893 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-57bhp" event={"ID":"8517a279-1eca-4be6-a4c0-09716207a094","Type":"ContainerStarted","Data":"3021e28397c367f28988a3673b2d5addafe74b573ae0f92aaa2954685a04ca2e"} Nov 25 21:42:52 crc kubenswrapper[4910]: I1125 21:42:52.783954 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-57bhp" event={"ID":"8517a279-1eca-4be6-a4c0-09716207a094","Type":"ContainerStarted","Data":"0e45d94e4f9cb91a039294e916885fdaa650cb5b01fe97e1da27a242fbfeba98"} Nov 25 21:42:52 crc kubenswrapper[4910]: I1125 21:42:52.783981 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-57bhp" Nov 25 21:42:52 crc kubenswrapper[4910]: I1125 21:42:52.784763 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qw4hv" event={"ID":"9d4be312-9921-4b2e-8456-acd2b0f012de","Type":"ContainerStarted","Data":"c12d70a10d62b5df03eb159b31a3fa7b9aaff185b4288f58b01eb2a5e235e212"} Nov 25 21:42:52 crc kubenswrapper[4910]: I1125 21:42:52.785649 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-csqvk" event={"ID":"5f6618c7-ba0f-45ce-a1f1-d42f55e72500","Type":"ContainerStarted","Data":"e5e208cf95eb87c2c8ed691e8d1eb547e1f171a079e590629c0f00f5d1e6fb56"} Nov 25 21:42:52 crc kubenswrapper[4910]: I1125 21:42:52.803190 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-57bhp" podStartSLOduration=1.803154257 podStartE2EDuration="1.803154257s" podCreationTimestamp="2025-11-25 21:42:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:42:52.798312896 +0000 UTC m=+728.260789268" watchObservedRunningTime="2025-11-25 21:42:52.803154257 +0000 UTC m=+728.265630609" Nov 25 21:42:53 crc kubenswrapper[4910]: I1125 21:42:53.099511 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:42:53 crc kubenswrapper[4910]: I1125 21:42:53.099598 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:42:53 crc kubenswrapper[4910]: I1125 21:42:53.409225 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/6dcf2928-8050-4c63-9035-35b85bb922ce-memberlist\") pod \"speaker-fmrxg\" (UID: \"6dcf2928-8050-4c63-9035-35b85bb922ce\") " pod="metallb-system/speaker-fmrxg" Nov 25 21:42:53 crc kubenswrapper[4910]: I1125 21:42:53.415190 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/6dcf2928-8050-4c63-9035-35b85bb922ce-memberlist\") pod \"speaker-fmrxg\" (UID: \"6dcf2928-8050-4c63-9035-35b85bb922ce\") " pod="metallb-system/speaker-fmrxg" Nov 25 21:42:53 crc kubenswrapper[4910]: I1125 21:42:53.699485 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-fmrxg" Nov 25 21:42:53 crc kubenswrapper[4910]: W1125 21:42:53.720356 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6dcf2928_8050_4c63_9035_35b85bb922ce.slice/crio-481307dd5ce674fb61248f9b0cc467117d7d2ba0e218a241539403ff525c1e57 WatchSource:0}: Error finding container 481307dd5ce674fb61248f9b0cc467117d7d2ba0e218a241539403ff525c1e57: Status 404 returned error can't find the container with id 481307dd5ce674fb61248f9b0cc467117d7d2ba0e218a241539403ff525c1e57 Nov 25 21:42:53 crc kubenswrapper[4910]: I1125 21:42:53.795810 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-fmrxg" event={"ID":"6dcf2928-8050-4c63-9035-35b85bb922ce","Type":"ContainerStarted","Data":"481307dd5ce674fb61248f9b0cc467117d7d2ba0e218a241539403ff525c1e57"} Nov 25 21:42:54 crc kubenswrapper[4910]: I1125 21:42:54.807510 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-fmrxg" event={"ID":"6dcf2928-8050-4c63-9035-35b85bb922ce","Type":"ContainerStarted","Data":"8dc4dddb0b644abe2dd8936455bac910fbf94f58886b5ed74b048636f46ca4b5"} Nov 25 21:42:54 crc kubenswrapper[4910]: I1125 21:42:54.807905 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-fmrxg" Nov 25 21:42:54 crc kubenswrapper[4910]: I1125 21:42:54.807921 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-fmrxg" event={"ID":"6dcf2928-8050-4c63-9035-35b85bb922ce","Type":"ContainerStarted","Data":"d68fe263f50f6bfcfd16845654b654869e40ce14b242938fe63e79116c8dbe2b"} Nov 25 21:42:54 crc kubenswrapper[4910]: I1125 21:42:54.826985 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-fmrxg" podStartSLOduration=3.826961867 podStartE2EDuration="3.826961867s" podCreationTimestamp="2025-11-25 21:42:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:42:54.826072753 +0000 UTC m=+730.288549075" watchObservedRunningTime="2025-11-25 21:42:54.826961867 +0000 UTC m=+730.289438189" Nov 25 21:42:59 crc kubenswrapper[4910]: I1125 21:42:59.843724 4910 generic.go:334] "Generic (PLEG): container finished" podID="9d4be312-9921-4b2e-8456-acd2b0f012de" containerID="88be29b85406b9a85f18c0c55645c45839ae190592f8f9efdd59641e59a7629a" exitCode=0 Nov 25 21:42:59 crc kubenswrapper[4910]: I1125 21:42:59.843768 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qw4hv" event={"ID":"9d4be312-9921-4b2e-8456-acd2b0f012de","Type":"ContainerDied","Data":"88be29b85406b9a85f18c0c55645c45839ae190592f8f9efdd59641e59a7629a"} Nov 25 21:42:59 crc kubenswrapper[4910]: I1125 21:42:59.845936 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-csqvk" event={"ID":"5f6618c7-ba0f-45ce-a1f1-d42f55e72500","Type":"ContainerStarted","Data":"963241906c60b0bd1aefd15972d9afc104cbb7224e660a560317ece089de1eb7"} Nov 25 21:42:59 crc kubenswrapper[4910]: I1125 21:42:59.846089 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-csqvk" Nov 25 21:42:59 crc kubenswrapper[4910]: I1125 21:42:59.886236 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-csqvk" podStartSLOduration=2.5281511290000003 podStartE2EDuration="8.886217799s" podCreationTimestamp="2025-11-25 21:42:51 +0000 UTC" firstStartedPulling="2025-11-25 21:42:52.617302607 +0000 UTC m=+728.079778929" lastFinishedPulling="2025-11-25 21:42:58.975369277 +0000 UTC m=+734.437845599" observedRunningTime="2025-11-25 21:42:59.88478507 +0000 UTC m=+735.347261392" watchObservedRunningTime="2025-11-25 21:42:59.886217799 +0000 UTC m=+735.348694121" Nov 25 21:43:00 crc kubenswrapper[4910]: I1125 21:43:00.884818 4910 generic.go:334] "Generic (PLEG): container finished" podID="9d4be312-9921-4b2e-8456-acd2b0f012de" containerID="b500436f7bb009cee0c0f05bba1069629a4e5f783fb06a2eef4aa9573d3d6d2d" exitCode=0 Nov 25 21:43:00 crc kubenswrapper[4910]: I1125 21:43:00.886856 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qw4hv" event={"ID":"9d4be312-9921-4b2e-8456-acd2b0f012de","Type":"ContainerDied","Data":"b500436f7bb009cee0c0f05bba1069629a4e5f783fb06a2eef4aa9573d3d6d2d"} Nov 25 21:43:01 crc kubenswrapper[4910]: I1125 21:43:01.894603 4910 generic.go:334] "Generic (PLEG): container finished" podID="9d4be312-9921-4b2e-8456-acd2b0f012de" containerID="a5ca55e369c2a3fa9e119f5823ce5c9c8b1a6fd6938708938dc17b9aa94c06fe" exitCode=0 Nov 25 21:43:01 crc kubenswrapper[4910]: I1125 21:43:01.894979 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qw4hv" event={"ID":"9d4be312-9921-4b2e-8456-acd2b0f012de","Type":"ContainerDied","Data":"a5ca55e369c2a3fa9e119f5823ce5c9c8b1a6fd6938708938dc17b9aa94c06fe"} Nov 25 21:43:02 crc kubenswrapper[4910]: I1125 21:43:02.904992 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qw4hv" event={"ID":"9d4be312-9921-4b2e-8456-acd2b0f012de","Type":"ContainerStarted","Data":"0cc2def34b9878d971b861095447ad1e89e4c57435fdb938d87b5e3e5dc0f27a"} Nov 25 21:43:02 crc kubenswrapper[4910]: I1125 21:43:02.905065 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qw4hv" event={"ID":"9d4be312-9921-4b2e-8456-acd2b0f012de","Type":"ContainerStarted","Data":"a7c01d3775e1c8e19670e957e67c43dc46b9698b983090dc13174ac47ed8e43d"} Nov 25 21:43:02 crc kubenswrapper[4910]: I1125 21:43:02.905080 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qw4hv" event={"ID":"9d4be312-9921-4b2e-8456-acd2b0f012de","Type":"ContainerStarted","Data":"ec8f4263099c2e9042542b67c94fa035a2bc38709bec237527d1fee0c6c7697a"} Nov 25 21:43:02 crc kubenswrapper[4910]: I1125 21:43:02.905094 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qw4hv" event={"ID":"9d4be312-9921-4b2e-8456-acd2b0f012de","Type":"ContainerStarted","Data":"97e2c1d7c6715514f8c834ffe035dee5d22d98f2b57414b3b3a6173cfa744668"} Nov 25 21:43:02 crc kubenswrapper[4910]: I1125 21:43:02.905106 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qw4hv" event={"ID":"9d4be312-9921-4b2e-8456-acd2b0f012de","Type":"ContainerStarted","Data":"4d0054ce7162a1d9b45babebe9685344c47487a4d27d130293adcddc233b12b0"} Nov 25 21:43:02 crc kubenswrapper[4910]: I1125 21:43:02.905117 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-qw4hv" event={"ID":"9d4be312-9921-4b2e-8456-acd2b0f012de","Type":"ContainerStarted","Data":"ba399a313ac4476913853ed1092fc90fa307d373f467cf20810a1cad39c53893"} Nov 25 21:43:02 crc kubenswrapper[4910]: I1125 21:43:02.905153 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:43:02 crc kubenswrapper[4910]: I1125 21:43:02.928367 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-qw4hv" podStartSLOduration=4.921467077 podStartE2EDuration="11.92834838s" podCreationTimestamp="2025-11-25 21:42:51 +0000 UTC" firstStartedPulling="2025-11-25 21:42:51.96126319 +0000 UTC m=+727.423739512" lastFinishedPulling="2025-11-25 21:42:58.968144493 +0000 UTC m=+734.430620815" observedRunningTime="2025-11-25 21:43:02.925141724 +0000 UTC m=+738.387618046" watchObservedRunningTime="2025-11-25 21:43:02.92834838 +0000 UTC m=+738.390824702" Nov 25 21:43:03 crc kubenswrapper[4910]: I1125 21:43:03.704583 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-fmrxg" Nov 25 21:43:06 crc kubenswrapper[4910]: I1125 21:43:06.833072 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-99pj5"] Nov 25 21:43:06 crc kubenswrapper[4910]: I1125 21:43:06.834154 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:43:06 crc kubenswrapper[4910]: I1125 21:43:06.834530 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-99pj5" Nov 25 21:43:06 crc kubenswrapper[4910]: I1125 21:43:06.836906 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 25 21:43:06 crc kubenswrapper[4910]: I1125 21:43:06.838214 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-ggrcp" Nov 25 21:43:06 crc kubenswrapper[4910]: I1125 21:43:06.839463 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 25 21:43:06 crc kubenswrapper[4910]: I1125 21:43:06.847878 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-99pj5"] Nov 25 21:43:06 crc kubenswrapper[4910]: I1125 21:43:06.893771 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:43:07 crc kubenswrapper[4910]: I1125 21:43:07.023428 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x8lbk\" (UniqueName: \"kubernetes.io/projected/39fb1b39-b714-4dbc-83f0-8ee115cc74a5-kube-api-access-x8lbk\") pod \"openstack-operator-index-99pj5\" (UID: \"39fb1b39-b714-4dbc-83f0-8ee115cc74a5\") " pod="openstack-operators/openstack-operator-index-99pj5" Nov 25 21:43:07 crc kubenswrapper[4910]: I1125 21:43:07.124779 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x8lbk\" (UniqueName: \"kubernetes.io/projected/39fb1b39-b714-4dbc-83f0-8ee115cc74a5-kube-api-access-x8lbk\") pod \"openstack-operator-index-99pj5\" (UID: \"39fb1b39-b714-4dbc-83f0-8ee115cc74a5\") " pod="openstack-operators/openstack-operator-index-99pj5" Nov 25 21:43:07 crc kubenswrapper[4910]: I1125 21:43:07.142588 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x8lbk\" (UniqueName: \"kubernetes.io/projected/39fb1b39-b714-4dbc-83f0-8ee115cc74a5-kube-api-access-x8lbk\") pod \"openstack-operator-index-99pj5\" (UID: \"39fb1b39-b714-4dbc-83f0-8ee115cc74a5\") " pod="openstack-operators/openstack-operator-index-99pj5" Nov 25 21:43:07 crc kubenswrapper[4910]: I1125 21:43:07.152382 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-99pj5" Nov 25 21:43:07 crc kubenswrapper[4910]: I1125 21:43:07.364110 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-99pj5"] Nov 25 21:43:07 crc kubenswrapper[4910]: W1125 21:43:07.374415 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39fb1b39_b714_4dbc_83f0_8ee115cc74a5.slice/crio-c3bf2e0cd05a4e847169aa08fc25561df3247b284796a340f31160d32c37639f WatchSource:0}: Error finding container c3bf2e0cd05a4e847169aa08fc25561df3247b284796a340f31160d32c37639f: Status 404 returned error can't find the container with id c3bf2e0cd05a4e847169aa08fc25561df3247b284796a340f31160d32c37639f Nov 25 21:43:07 crc kubenswrapper[4910]: I1125 21:43:07.951407 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-99pj5" event={"ID":"39fb1b39-b714-4dbc-83f0-8ee115cc74a5","Type":"ContainerStarted","Data":"c3bf2e0cd05a4e847169aa08fc25561df3247b284796a340f31160d32c37639f"} Nov 25 21:43:09 crc kubenswrapper[4910]: I1125 21:43:09.964105 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-99pj5" event={"ID":"39fb1b39-b714-4dbc-83f0-8ee115cc74a5","Type":"ContainerStarted","Data":"8fa0cf614d8a7df70c785a638116ecee0900e8f653647a1ae602f0e6ad22920c"} Nov 25 21:43:09 crc kubenswrapper[4910]: I1125 21:43:09.980360 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-99pj5" podStartSLOduration=1.614066125 podStartE2EDuration="3.980338117s" podCreationTimestamp="2025-11-25 21:43:06 +0000 UTC" firstStartedPulling="2025-11-25 21:43:07.379909476 +0000 UTC m=+742.842385798" lastFinishedPulling="2025-11-25 21:43:09.746181478 +0000 UTC m=+745.208657790" observedRunningTime="2025-11-25 21:43:09.97893346 +0000 UTC m=+745.441409782" watchObservedRunningTime="2025-11-25 21:43:09.980338117 +0000 UTC m=+745.442814479" Nov 25 21:43:10 crc kubenswrapper[4910]: I1125 21:43:10.627704 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-99pj5"] Nov 25 21:43:11 crc kubenswrapper[4910]: I1125 21:43:11.231953 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-7lqkm"] Nov 25 21:43:11 crc kubenswrapper[4910]: I1125 21:43:11.232704 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-7lqkm" Nov 25 21:43:11 crc kubenswrapper[4910]: I1125 21:43:11.248557 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-7lqkm"] Nov 25 21:43:11 crc kubenswrapper[4910]: I1125 21:43:11.383832 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x6wrt\" (UniqueName: \"kubernetes.io/projected/a4af1c8b-9a29-47cc-aec2-501fe04e24fd-kube-api-access-x6wrt\") pod \"openstack-operator-index-7lqkm\" (UID: \"a4af1c8b-9a29-47cc-aec2-501fe04e24fd\") " pod="openstack-operators/openstack-operator-index-7lqkm" Nov 25 21:43:11 crc kubenswrapper[4910]: I1125 21:43:11.484910 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x6wrt\" (UniqueName: \"kubernetes.io/projected/a4af1c8b-9a29-47cc-aec2-501fe04e24fd-kube-api-access-x6wrt\") pod \"openstack-operator-index-7lqkm\" (UID: \"a4af1c8b-9a29-47cc-aec2-501fe04e24fd\") " pod="openstack-operators/openstack-operator-index-7lqkm" Nov 25 21:43:11 crc kubenswrapper[4910]: I1125 21:43:11.518023 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x6wrt\" (UniqueName: \"kubernetes.io/projected/a4af1c8b-9a29-47cc-aec2-501fe04e24fd-kube-api-access-x6wrt\") pod \"openstack-operator-index-7lqkm\" (UID: \"a4af1c8b-9a29-47cc-aec2-501fe04e24fd\") " pod="openstack-operators/openstack-operator-index-7lqkm" Nov 25 21:43:11 crc kubenswrapper[4910]: I1125 21:43:11.554761 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-7lqkm" Nov 25 21:43:11 crc kubenswrapper[4910]: I1125 21:43:11.751879 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-7lqkm"] Nov 25 21:43:11 crc kubenswrapper[4910]: I1125 21:43:11.920514 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-57bhp" Nov 25 21:43:11 crc kubenswrapper[4910]: I1125 21:43:11.977702 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-7lqkm" event={"ID":"a4af1c8b-9a29-47cc-aec2-501fe04e24fd","Type":"ContainerStarted","Data":"89f65571a1dccd2b7cd9215b9150e31d57e34b0f125202e1bcc47e5ffdeb7d38"} Nov 25 21:43:11 crc kubenswrapper[4910]: I1125 21:43:11.977862 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-99pj5" podUID="39fb1b39-b714-4dbc-83f0-8ee115cc74a5" containerName="registry-server" containerID="cri-o://8fa0cf614d8a7df70c785a638116ecee0900e8f653647a1ae602f0e6ad22920c" gracePeriod=2 Nov 25 21:43:12 crc kubenswrapper[4910]: I1125 21:43:12.324408 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-99pj5" Nov 25 21:43:12 crc kubenswrapper[4910]: I1125 21:43:12.407835 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x8lbk\" (UniqueName: \"kubernetes.io/projected/39fb1b39-b714-4dbc-83f0-8ee115cc74a5-kube-api-access-x8lbk\") pod \"39fb1b39-b714-4dbc-83f0-8ee115cc74a5\" (UID: \"39fb1b39-b714-4dbc-83f0-8ee115cc74a5\") " Nov 25 21:43:12 crc kubenswrapper[4910]: I1125 21:43:12.413766 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/39fb1b39-b714-4dbc-83f0-8ee115cc74a5-kube-api-access-x8lbk" (OuterVolumeSpecName: "kube-api-access-x8lbk") pod "39fb1b39-b714-4dbc-83f0-8ee115cc74a5" (UID: "39fb1b39-b714-4dbc-83f0-8ee115cc74a5"). InnerVolumeSpecName "kube-api-access-x8lbk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:43:12 crc kubenswrapper[4910]: I1125 21:43:12.427056 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-csqvk" Nov 25 21:43:12 crc kubenswrapper[4910]: I1125 21:43:12.514455 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x8lbk\" (UniqueName: \"kubernetes.io/projected/39fb1b39-b714-4dbc-83f0-8ee115cc74a5-kube-api-access-x8lbk\") on node \"crc\" DevicePath \"\"" Nov 25 21:43:12 crc kubenswrapper[4910]: I1125 21:43:12.985626 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-7lqkm" event={"ID":"a4af1c8b-9a29-47cc-aec2-501fe04e24fd","Type":"ContainerStarted","Data":"6d7730d2da750632e471ccdb4f39418ecd685c8c85433b88859a3a9988268b5d"} Nov 25 21:43:12 crc kubenswrapper[4910]: I1125 21:43:12.987045 4910 generic.go:334] "Generic (PLEG): container finished" podID="39fb1b39-b714-4dbc-83f0-8ee115cc74a5" containerID="8fa0cf614d8a7df70c785a638116ecee0900e8f653647a1ae602f0e6ad22920c" exitCode=0 Nov 25 21:43:12 crc kubenswrapper[4910]: I1125 21:43:12.987108 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-99pj5" event={"ID":"39fb1b39-b714-4dbc-83f0-8ee115cc74a5","Type":"ContainerDied","Data":"8fa0cf614d8a7df70c785a638116ecee0900e8f653647a1ae602f0e6ad22920c"} Nov 25 21:43:12 crc kubenswrapper[4910]: I1125 21:43:12.987171 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-99pj5" Nov 25 21:43:12 crc kubenswrapper[4910]: I1125 21:43:12.987293 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-99pj5" event={"ID":"39fb1b39-b714-4dbc-83f0-8ee115cc74a5","Type":"ContainerDied","Data":"c3bf2e0cd05a4e847169aa08fc25561df3247b284796a340f31160d32c37639f"} Nov 25 21:43:12 crc kubenswrapper[4910]: I1125 21:43:12.987397 4910 scope.go:117] "RemoveContainer" containerID="8fa0cf614d8a7df70c785a638116ecee0900e8f653647a1ae602f0e6ad22920c" Nov 25 21:43:13 crc kubenswrapper[4910]: I1125 21:43:13.001015 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-7lqkm" podStartSLOduration=1.678013065 podStartE2EDuration="2.000999713s" podCreationTimestamp="2025-11-25 21:43:11 +0000 UTC" firstStartedPulling="2025-11-25 21:43:11.767670237 +0000 UTC m=+747.230146569" lastFinishedPulling="2025-11-25 21:43:12.090656895 +0000 UTC m=+747.553133217" observedRunningTime="2025-11-25 21:43:13.000197422 +0000 UTC m=+748.462673754" watchObservedRunningTime="2025-11-25 21:43:13.000999713 +0000 UTC m=+748.463476035" Nov 25 21:43:13 crc kubenswrapper[4910]: I1125 21:43:13.002452 4910 scope.go:117] "RemoveContainer" containerID="8fa0cf614d8a7df70c785a638116ecee0900e8f653647a1ae602f0e6ad22920c" Nov 25 21:43:13 crc kubenswrapper[4910]: E1125 21:43:13.003012 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fa0cf614d8a7df70c785a638116ecee0900e8f653647a1ae602f0e6ad22920c\": container with ID starting with 8fa0cf614d8a7df70c785a638116ecee0900e8f653647a1ae602f0e6ad22920c not found: ID does not exist" containerID="8fa0cf614d8a7df70c785a638116ecee0900e8f653647a1ae602f0e6ad22920c" Nov 25 21:43:13 crc kubenswrapper[4910]: I1125 21:43:13.003054 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fa0cf614d8a7df70c785a638116ecee0900e8f653647a1ae602f0e6ad22920c"} err="failed to get container status \"8fa0cf614d8a7df70c785a638116ecee0900e8f653647a1ae602f0e6ad22920c\": rpc error: code = NotFound desc = could not find container \"8fa0cf614d8a7df70c785a638116ecee0900e8f653647a1ae602f0e6ad22920c\": container with ID starting with 8fa0cf614d8a7df70c785a638116ecee0900e8f653647a1ae602f0e6ad22920c not found: ID does not exist" Nov 25 21:43:13 crc kubenswrapper[4910]: I1125 21:43:13.023097 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-99pj5"] Nov 25 21:43:13 crc kubenswrapper[4910]: I1125 21:43:13.026607 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-99pj5"] Nov 25 21:43:13 crc kubenswrapper[4910]: I1125 21:43:13.210459 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="39fb1b39-b714-4dbc-83f0-8ee115cc74a5" path="/var/lib/kubelet/pods/39fb1b39-b714-4dbc-83f0-8ee115cc74a5/volumes" Nov 25 21:43:21 crc kubenswrapper[4910]: I1125 21:43:21.527887 4910 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 25 21:43:21 crc kubenswrapper[4910]: I1125 21:43:21.555683 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-7lqkm" Nov 25 21:43:21 crc kubenswrapper[4910]: I1125 21:43:21.555767 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-7lqkm" Nov 25 21:43:21 crc kubenswrapper[4910]: I1125 21:43:21.618856 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-7lqkm" Nov 25 21:43:21 crc kubenswrapper[4910]: I1125 21:43:21.837591 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-qw4hv" Nov 25 21:43:22 crc kubenswrapper[4910]: I1125 21:43:22.096119 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-7lqkm" Nov 25 21:43:23 crc kubenswrapper[4910]: I1125 21:43:23.098460 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:43:23 crc kubenswrapper[4910]: I1125 21:43:23.098524 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:43:23 crc kubenswrapper[4910]: I1125 21:43:23.098579 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:43:23 crc kubenswrapper[4910]: I1125 21:43:23.099195 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1d449a51cc6d0f8601906171d97e528f4369d984db9458b4317c75e761fb730e"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 21:43:23 crc kubenswrapper[4910]: I1125 21:43:23.099270 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://1d449a51cc6d0f8601906171d97e528f4369d984db9458b4317c75e761fb730e" gracePeriod=600 Nov 25 21:43:24 crc kubenswrapper[4910]: I1125 21:43:24.067159 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="1d449a51cc6d0f8601906171d97e528f4369d984db9458b4317c75e761fb730e" exitCode=0 Nov 25 21:43:24 crc kubenswrapper[4910]: I1125 21:43:24.067206 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"1d449a51cc6d0f8601906171d97e528f4369d984db9458b4317c75e761fb730e"} Nov 25 21:43:24 crc kubenswrapper[4910]: I1125 21:43:24.067435 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"8368a57726af5a6b75ce9b9efb9fa3828db0cba5637cfb1aba6ea91ccf50acb2"} Nov 25 21:43:24 crc kubenswrapper[4910]: I1125 21:43:24.067458 4910 scope.go:117] "RemoveContainer" containerID="30286bee60e1a7a80129654be478335ca47bde695cc33c16b01fd38ede68a6b8" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.091504 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp"] Nov 25 21:43:35 crc kubenswrapper[4910]: E1125 21:43:35.092584 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="39fb1b39-b714-4dbc-83f0-8ee115cc74a5" containerName="registry-server" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.092607 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="39fb1b39-b714-4dbc-83f0-8ee115cc74a5" containerName="registry-server" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.092874 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="39fb1b39-b714-4dbc-83f0-8ee115cc74a5" containerName="registry-server" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.094381 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.097500 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-cpdd4" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.100476 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp"] Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.169757 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/770f8bcb-c718-44f0-9311-d3f1a782aed2-util\") pod \"9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp\" (UID: \"770f8bcb-c718-44f0-9311-d3f1a782aed2\") " pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.169831 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2svp\" (UniqueName: \"kubernetes.io/projected/770f8bcb-c718-44f0-9311-d3f1a782aed2-kube-api-access-h2svp\") pod \"9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp\" (UID: \"770f8bcb-c718-44f0-9311-d3f1a782aed2\") " pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.170057 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/770f8bcb-c718-44f0-9311-d3f1a782aed2-bundle\") pod \"9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp\" (UID: \"770f8bcb-c718-44f0-9311-d3f1a782aed2\") " pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.272184 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/770f8bcb-c718-44f0-9311-d3f1a782aed2-bundle\") pod \"9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp\" (UID: \"770f8bcb-c718-44f0-9311-d3f1a782aed2\") " pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.272361 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/770f8bcb-c718-44f0-9311-d3f1a782aed2-util\") pod \"9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp\" (UID: \"770f8bcb-c718-44f0-9311-d3f1a782aed2\") " pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.272384 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2svp\" (UniqueName: \"kubernetes.io/projected/770f8bcb-c718-44f0-9311-d3f1a782aed2-kube-api-access-h2svp\") pod \"9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp\" (UID: \"770f8bcb-c718-44f0-9311-d3f1a782aed2\") " pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.272717 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/770f8bcb-c718-44f0-9311-d3f1a782aed2-bundle\") pod \"9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp\" (UID: \"770f8bcb-c718-44f0-9311-d3f1a782aed2\") " pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.273062 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/770f8bcb-c718-44f0-9311-d3f1a782aed2-util\") pod \"9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp\" (UID: \"770f8bcb-c718-44f0-9311-d3f1a782aed2\") " pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.296499 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2svp\" (UniqueName: \"kubernetes.io/projected/770f8bcb-c718-44f0-9311-d3f1a782aed2-kube-api-access-h2svp\") pod \"9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp\" (UID: \"770f8bcb-c718-44f0-9311-d3f1a782aed2\") " pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.414742 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" Nov 25 21:43:35 crc kubenswrapper[4910]: I1125 21:43:35.704830 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp"] Nov 25 21:43:36 crc kubenswrapper[4910]: I1125 21:43:36.161127 4910 generic.go:334] "Generic (PLEG): container finished" podID="770f8bcb-c718-44f0-9311-d3f1a782aed2" containerID="6b446406526f17290cdb66ea3744aaff7d65983ce6d097d9f9c2d2448c618cf5" exitCode=0 Nov 25 21:43:36 crc kubenswrapper[4910]: I1125 21:43:36.161184 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" event={"ID":"770f8bcb-c718-44f0-9311-d3f1a782aed2","Type":"ContainerDied","Data":"6b446406526f17290cdb66ea3744aaff7d65983ce6d097d9f9c2d2448c618cf5"} Nov 25 21:43:36 crc kubenswrapper[4910]: I1125 21:43:36.161511 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" event={"ID":"770f8bcb-c718-44f0-9311-d3f1a782aed2","Type":"ContainerStarted","Data":"ebf4568b9dbedb4abf86544e73f6812370e92148f9d44dacd55aa0f7552bf5a7"} Nov 25 21:43:37 crc kubenswrapper[4910]: I1125 21:43:37.170508 4910 generic.go:334] "Generic (PLEG): container finished" podID="770f8bcb-c718-44f0-9311-d3f1a782aed2" containerID="f0be4263c95f1a999280cea3aa245aa1e47c6f2072f80519bd7cce56b2d6dc63" exitCode=0 Nov 25 21:43:37 crc kubenswrapper[4910]: I1125 21:43:37.170586 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" event={"ID":"770f8bcb-c718-44f0-9311-d3f1a782aed2","Type":"ContainerDied","Data":"f0be4263c95f1a999280cea3aa245aa1e47c6f2072f80519bd7cce56b2d6dc63"} Nov 25 21:43:37 crc kubenswrapper[4910]: I1125 21:43:37.651105 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dphj6"] Nov 25 21:43:37 crc kubenswrapper[4910]: I1125 21:43:37.653199 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:37 crc kubenswrapper[4910]: I1125 21:43:37.658215 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dphj6"] Nov 25 21:43:37 crc kubenswrapper[4910]: I1125 21:43:37.703205 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/423e842c-ba28-421a-8031-84d5b8d86c4f-catalog-content\") pod \"certified-operators-dphj6\" (UID: \"423e842c-ba28-421a-8031-84d5b8d86c4f\") " pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:37 crc kubenswrapper[4910]: I1125 21:43:37.703297 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/423e842c-ba28-421a-8031-84d5b8d86c4f-utilities\") pod \"certified-operators-dphj6\" (UID: \"423e842c-ba28-421a-8031-84d5b8d86c4f\") " pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:37 crc kubenswrapper[4910]: I1125 21:43:37.703333 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmblp\" (UniqueName: \"kubernetes.io/projected/423e842c-ba28-421a-8031-84d5b8d86c4f-kube-api-access-qmblp\") pod \"certified-operators-dphj6\" (UID: \"423e842c-ba28-421a-8031-84d5b8d86c4f\") " pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:37 crc kubenswrapper[4910]: I1125 21:43:37.804903 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/423e842c-ba28-421a-8031-84d5b8d86c4f-catalog-content\") pod \"certified-operators-dphj6\" (UID: \"423e842c-ba28-421a-8031-84d5b8d86c4f\") " pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:37 crc kubenswrapper[4910]: I1125 21:43:37.804992 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/423e842c-ba28-421a-8031-84d5b8d86c4f-utilities\") pod \"certified-operators-dphj6\" (UID: \"423e842c-ba28-421a-8031-84d5b8d86c4f\") " pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:37 crc kubenswrapper[4910]: I1125 21:43:37.805040 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmblp\" (UniqueName: \"kubernetes.io/projected/423e842c-ba28-421a-8031-84d5b8d86c4f-kube-api-access-qmblp\") pod \"certified-operators-dphj6\" (UID: \"423e842c-ba28-421a-8031-84d5b8d86c4f\") " pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:37 crc kubenswrapper[4910]: I1125 21:43:37.805544 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/423e842c-ba28-421a-8031-84d5b8d86c4f-catalog-content\") pod \"certified-operators-dphj6\" (UID: \"423e842c-ba28-421a-8031-84d5b8d86c4f\") " pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:37 crc kubenswrapper[4910]: I1125 21:43:37.805614 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/423e842c-ba28-421a-8031-84d5b8d86c4f-utilities\") pod \"certified-operators-dphj6\" (UID: \"423e842c-ba28-421a-8031-84d5b8d86c4f\") " pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:37 crc kubenswrapper[4910]: I1125 21:43:37.827988 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmblp\" (UniqueName: \"kubernetes.io/projected/423e842c-ba28-421a-8031-84d5b8d86c4f-kube-api-access-qmblp\") pod \"certified-operators-dphj6\" (UID: \"423e842c-ba28-421a-8031-84d5b8d86c4f\") " pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:38 crc kubenswrapper[4910]: I1125 21:43:38.027802 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:38 crc kubenswrapper[4910]: I1125 21:43:38.192957 4910 generic.go:334] "Generic (PLEG): container finished" podID="770f8bcb-c718-44f0-9311-d3f1a782aed2" containerID="b635d0153792746824aaa7dccfdef04fa415d210e2e7a423e8e5b93ad925e3d5" exitCode=0 Nov 25 21:43:38 crc kubenswrapper[4910]: I1125 21:43:38.193119 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" event={"ID":"770f8bcb-c718-44f0-9311-d3f1a782aed2","Type":"ContainerDied","Data":"b635d0153792746824aaa7dccfdef04fa415d210e2e7a423e8e5b93ad925e3d5"} Nov 25 21:43:38 crc kubenswrapper[4910]: I1125 21:43:38.269653 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dphj6"] Nov 25 21:43:38 crc kubenswrapper[4910]: W1125 21:43:38.274564 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod423e842c_ba28_421a_8031_84d5b8d86c4f.slice/crio-fca24ea57aa441e91065e6fd1c5525018fb7f3159ee3e5364fdbd00161dab174 WatchSource:0}: Error finding container fca24ea57aa441e91065e6fd1c5525018fb7f3159ee3e5364fdbd00161dab174: Status 404 returned error can't find the container with id fca24ea57aa441e91065e6fd1c5525018fb7f3159ee3e5364fdbd00161dab174 Nov 25 21:43:39 crc kubenswrapper[4910]: I1125 21:43:39.205702 4910 generic.go:334] "Generic (PLEG): container finished" podID="423e842c-ba28-421a-8031-84d5b8d86c4f" containerID="74f2710b16b0d96cbd5335763e9d05781b46e45f117cd1f7561145f82b200932" exitCode=0 Nov 25 21:43:39 crc kubenswrapper[4910]: I1125 21:43:39.215303 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dphj6" event={"ID":"423e842c-ba28-421a-8031-84d5b8d86c4f","Type":"ContainerDied","Data":"74f2710b16b0d96cbd5335763e9d05781b46e45f117cd1f7561145f82b200932"} Nov 25 21:43:39 crc kubenswrapper[4910]: I1125 21:43:39.215361 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dphj6" event={"ID":"423e842c-ba28-421a-8031-84d5b8d86c4f","Type":"ContainerStarted","Data":"fca24ea57aa441e91065e6fd1c5525018fb7f3159ee3e5364fdbd00161dab174"} Nov 25 21:43:39 crc kubenswrapper[4910]: I1125 21:43:39.446929 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" Nov 25 21:43:39 crc kubenswrapper[4910]: I1125 21:43:39.530620 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/770f8bcb-c718-44f0-9311-d3f1a782aed2-bundle\") pod \"770f8bcb-c718-44f0-9311-d3f1a782aed2\" (UID: \"770f8bcb-c718-44f0-9311-d3f1a782aed2\") " Nov 25 21:43:39 crc kubenswrapper[4910]: I1125 21:43:39.530779 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/770f8bcb-c718-44f0-9311-d3f1a782aed2-util\") pod \"770f8bcb-c718-44f0-9311-d3f1a782aed2\" (UID: \"770f8bcb-c718-44f0-9311-d3f1a782aed2\") " Nov 25 21:43:39 crc kubenswrapper[4910]: I1125 21:43:39.530844 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2svp\" (UniqueName: \"kubernetes.io/projected/770f8bcb-c718-44f0-9311-d3f1a782aed2-kube-api-access-h2svp\") pod \"770f8bcb-c718-44f0-9311-d3f1a782aed2\" (UID: \"770f8bcb-c718-44f0-9311-d3f1a782aed2\") " Nov 25 21:43:39 crc kubenswrapper[4910]: I1125 21:43:39.533131 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/770f8bcb-c718-44f0-9311-d3f1a782aed2-bundle" (OuterVolumeSpecName: "bundle") pod "770f8bcb-c718-44f0-9311-d3f1a782aed2" (UID: "770f8bcb-c718-44f0-9311-d3f1a782aed2"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:43:39 crc kubenswrapper[4910]: I1125 21:43:39.538366 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/770f8bcb-c718-44f0-9311-d3f1a782aed2-kube-api-access-h2svp" (OuterVolumeSpecName: "kube-api-access-h2svp") pod "770f8bcb-c718-44f0-9311-d3f1a782aed2" (UID: "770f8bcb-c718-44f0-9311-d3f1a782aed2"). InnerVolumeSpecName "kube-api-access-h2svp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:43:39 crc kubenswrapper[4910]: I1125 21:43:39.548678 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/770f8bcb-c718-44f0-9311-d3f1a782aed2-util" (OuterVolumeSpecName: "util") pod "770f8bcb-c718-44f0-9311-d3f1a782aed2" (UID: "770f8bcb-c718-44f0-9311-d3f1a782aed2"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:43:39 crc kubenswrapper[4910]: I1125 21:43:39.632316 4910 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/770f8bcb-c718-44f0-9311-d3f1a782aed2-util\") on node \"crc\" DevicePath \"\"" Nov 25 21:43:39 crc kubenswrapper[4910]: I1125 21:43:39.632374 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2svp\" (UniqueName: \"kubernetes.io/projected/770f8bcb-c718-44f0-9311-d3f1a782aed2-kube-api-access-h2svp\") on node \"crc\" DevicePath \"\"" Nov 25 21:43:39 crc kubenswrapper[4910]: I1125 21:43:39.632394 4910 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/770f8bcb-c718-44f0-9311-d3f1a782aed2-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:43:40 crc kubenswrapper[4910]: I1125 21:43:40.217854 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" event={"ID":"770f8bcb-c718-44f0-9311-d3f1a782aed2","Type":"ContainerDied","Data":"ebf4568b9dbedb4abf86544e73f6812370e92148f9d44dacd55aa0f7552bf5a7"} Nov 25 21:43:40 crc kubenswrapper[4910]: I1125 21:43:40.217928 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp" Nov 25 21:43:40 crc kubenswrapper[4910]: I1125 21:43:40.218457 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ebf4568b9dbedb4abf86544e73f6812370e92148f9d44dacd55aa0f7552bf5a7" Nov 25 21:43:41 crc kubenswrapper[4910]: I1125 21:43:41.228336 4910 generic.go:334] "Generic (PLEG): container finished" podID="423e842c-ba28-421a-8031-84d5b8d86c4f" containerID="8ca2d86e9bcec2f798a1131f7f2c38ed1a460ae1c06bc3d14e04dc2ec1d3a716" exitCode=0 Nov 25 21:43:41 crc kubenswrapper[4910]: I1125 21:43:41.228463 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dphj6" event={"ID":"423e842c-ba28-421a-8031-84d5b8d86c4f","Type":"ContainerDied","Data":"8ca2d86e9bcec2f798a1131f7f2c38ed1a460ae1c06bc3d14e04dc2ec1d3a716"} Nov 25 21:43:42 crc kubenswrapper[4910]: I1125 21:43:42.236936 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dphj6" event={"ID":"423e842c-ba28-421a-8031-84d5b8d86c4f","Type":"ContainerStarted","Data":"48165d29b89d3dc4b12f52ad8e379cca895d51d271f07027b4adfdb570f86085"} Nov 25 21:43:42 crc kubenswrapper[4910]: I1125 21:43:42.255000 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dphj6" podStartSLOduration=2.79527374 podStartE2EDuration="5.254981227s" podCreationTimestamp="2025-11-25 21:43:37 +0000 UTC" firstStartedPulling="2025-11-25 21:43:39.209934135 +0000 UTC m=+774.672410457" lastFinishedPulling="2025-11-25 21:43:41.669641592 +0000 UTC m=+777.132117944" observedRunningTime="2025-11-25 21:43:42.254139554 +0000 UTC m=+777.716615886" watchObservedRunningTime="2025-11-25 21:43:42.254981227 +0000 UTC m=+777.717457559" Nov 25 21:43:46 crc kubenswrapper[4910]: I1125 21:43:46.448806 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5475b86485-8h9j5"] Nov 25 21:43:46 crc kubenswrapper[4910]: E1125 21:43:46.449372 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="770f8bcb-c718-44f0-9311-d3f1a782aed2" containerName="pull" Nov 25 21:43:46 crc kubenswrapper[4910]: I1125 21:43:46.449386 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="770f8bcb-c718-44f0-9311-d3f1a782aed2" containerName="pull" Nov 25 21:43:46 crc kubenswrapper[4910]: E1125 21:43:46.449405 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="770f8bcb-c718-44f0-9311-d3f1a782aed2" containerName="extract" Nov 25 21:43:46 crc kubenswrapper[4910]: I1125 21:43:46.449411 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="770f8bcb-c718-44f0-9311-d3f1a782aed2" containerName="extract" Nov 25 21:43:46 crc kubenswrapper[4910]: E1125 21:43:46.449421 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="770f8bcb-c718-44f0-9311-d3f1a782aed2" containerName="util" Nov 25 21:43:46 crc kubenswrapper[4910]: I1125 21:43:46.449428 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="770f8bcb-c718-44f0-9311-d3f1a782aed2" containerName="util" Nov 25 21:43:46 crc kubenswrapper[4910]: I1125 21:43:46.449551 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="770f8bcb-c718-44f0-9311-d3f1a782aed2" containerName="extract" Nov 25 21:43:46 crc kubenswrapper[4910]: I1125 21:43:46.449960 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5475b86485-8h9j5" Nov 25 21:43:46 crc kubenswrapper[4910]: I1125 21:43:46.452074 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-8cxx8" Nov 25 21:43:46 crc kubenswrapper[4910]: I1125 21:43:46.481010 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5475b86485-8h9j5"] Nov 25 21:43:46 crc kubenswrapper[4910]: I1125 21:43:46.524549 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zgd67\" (UniqueName: \"kubernetes.io/projected/207712f3-d06c-435f-9a0d-f6a895ee4578-kube-api-access-zgd67\") pod \"openstack-operator-controller-operator-5475b86485-8h9j5\" (UID: \"207712f3-d06c-435f-9a0d-f6a895ee4578\") " pod="openstack-operators/openstack-operator-controller-operator-5475b86485-8h9j5" Nov 25 21:43:46 crc kubenswrapper[4910]: I1125 21:43:46.626019 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zgd67\" (UniqueName: \"kubernetes.io/projected/207712f3-d06c-435f-9a0d-f6a895ee4578-kube-api-access-zgd67\") pod \"openstack-operator-controller-operator-5475b86485-8h9j5\" (UID: \"207712f3-d06c-435f-9a0d-f6a895ee4578\") " pod="openstack-operators/openstack-operator-controller-operator-5475b86485-8h9j5" Nov 25 21:43:46 crc kubenswrapper[4910]: I1125 21:43:46.651700 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zgd67\" (UniqueName: \"kubernetes.io/projected/207712f3-d06c-435f-9a0d-f6a895ee4578-kube-api-access-zgd67\") pod \"openstack-operator-controller-operator-5475b86485-8h9j5\" (UID: \"207712f3-d06c-435f-9a0d-f6a895ee4578\") " pod="openstack-operators/openstack-operator-controller-operator-5475b86485-8h9j5" Nov 25 21:43:46 crc kubenswrapper[4910]: I1125 21:43:46.767320 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-5475b86485-8h9j5" Nov 25 21:43:47 crc kubenswrapper[4910]: I1125 21:43:47.248637 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-5475b86485-8h9j5"] Nov 25 21:43:47 crc kubenswrapper[4910]: I1125 21:43:47.283165 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5475b86485-8h9j5" event={"ID":"207712f3-d06c-435f-9a0d-f6a895ee4578","Type":"ContainerStarted","Data":"2a7264cedcba79579d65dcdf8eb4b04455d367fbc1a5d18e1d473d4359696b96"} Nov 25 21:43:48 crc kubenswrapper[4910]: I1125 21:43:48.028919 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:48 crc kubenswrapper[4910]: I1125 21:43:48.028989 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:48 crc kubenswrapper[4910]: I1125 21:43:48.082080 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:48 crc kubenswrapper[4910]: I1125 21:43:48.336039 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:50 crc kubenswrapper[4910]: I1125 21:43:50.035052 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dphj6"] Nov 25 21:43:50 crc kubenswrapper[4910]: I1125 21:43:50.308431 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dphj6" podUID="423e842c-ba28-421a-8031-84d5b8d86c4f" containerName="registry-server" containerID="cri-o://48165d29b89d3dc4b12f52ad8e379cca895d51d271f07027b4adfdb570f86085" gracePeriod=2 Nov 25 21:43:51 crc kubenswrapper[4910]: I1125 21:43:51.330905 4910 generic.go:334] "Generic (PLEG): container finished" podID="423e842c-ba28-421a-8031-84d5b8d86c4f" containerID="48165d29b89d3dc4b12f52ad8e379cca895d51d271f07027b4adfdb570f86085" exitCode=0 Nov 25 21:43:51 crc kubenswrapper[4910]: I1125 21:43:51.331020 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dphj6" event={"ID":"423e842c-ba28-421a-8031-84d5b8d86c4f","Type":"ContainerDied","Data":"48165d29b89d3dc4b12f52ad8e379cca895d51d271f07027b4adfdb570f86085"} Nov 25 21:43:51 crc kubenswrapper[4910]: I1125 21:43:51.474946 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:51 crc kubenswrapper[4910]: I1125 21:43:51.515439 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/423e842c-ba28-421a-8031-84d5b8d86c4f-catalog-content\") pod \"423e842c-ba28-421a-8031-84d5b8d86c4f\" (UID: \"423e842c-ba28-421a-8031-84d5b8d86c4f\") " Nov 25 21:43:51 crc kubenswrapper[4910]: I1125 21:43:51.515704 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/423e842c-ba28-421a-8031-84d5b8d86c4f-utilities\") pod \"423e842c-ba28-421a-8031-84d5b8d86c4f\" (UID: \"423e842c-ba28-421a-8031-84d5b8d86c4f\") " Nov 25 21:43:51 crc kubenswrapper[4910]: I1125 21:43:51.515806 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qmblp\" (UniqueName: \"kubernetes.io/projected/423e842c-ba28-421a-8031-84d5b8d86c4f-kube-api-access-qmblp\") pod \"423e842c-ba28-421a-8031-84d5b8d86c4f\" (UID: \"423e842c-ba28-421a-8031-84d5b8d86c4f\") " Nov 25 21:43:51 crc kubenswrapper[4910]: I1125 21:43:51.516587 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/423e842c-ba28-421a-8031-84d5b8d86c4f-utilities" (OuterVolumeSpecName: "utilities") pod "423e842c-ba28-421a-8031-84d5b8d86c4f" (UID: "423e842c-ba28-421a-8031-84d5b8d86c4f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:43:51 crc kubenswrapper[4910]: I1125 21:43:51.530522 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/423e842c-ba28-421a-8031-84d5b8d86c4f-kube-api-access-qmblp" (OuterVolumeSpecName: "kube-api-access-qmblp") pod "423e842c-ba28-421a-8031-84d5b8d86c4f" (UID: "423e842c-ba28-421a-8031-84d5b8d86c4f"). InnerVolumeSpecName "kube-api-access-qmblp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:43:51 crc kubenswrapper[4910]: I1125 21:43:51.566054 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/423e842c-ba28-421a-8031-84d5b8d86c4f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "423e842c-ba28-421a-8031-84d5b8d86c4f" (UID: "423e842c-ba28-421a-8031-84d5b8d86c4f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:43:51 crc kubenswrapper[4910]: I1125 21:43:51.617805 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/423e842c-ba28-421a-8031-84d5b8d86c4f-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:43:51 crc kubenswrapper[4910]: I1125 21:43:51.618060 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qmblp\" (UniqueName: \"kubernetes.io/projected/423e842c-ba28-421a-8031-84d5b8d86c4f-kube-api-access-qmblp\") on node \"crc\" DevicePath \"\"" Nov 25 21:43:51 crc kubenswrapper[4910]: I1125 21:43:51.618073 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/423e842c-ba28-421a-8031-84d5b8d86c4f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:43:52 crc kubenswrapper[4910]: I1125 21:43:52.340768 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dphj6" event={"ID":"423e842c-ba28-421a-8031-84d5b8d86c4f","Type":"ContainerDied","Data":"fca24ea57aa441e91065e6fd1c5525018fb7f3159ee3e5364fdbd00161dab174"} Nov 25 21:43:52 crc kubenswrapper[4910]: I1125 21:43:52.340834 4910 scope.go:117] "RemoveContainer" containerID="48165d29b89d3dc4b12f52ad8e379cca895d51d271f07027b4adfdb570f86085" Nov 25 21:43:52 crc kubenswrapper[4910]: I1125 21:43:52.340981 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dphj6" Nov 25 21:43:52 crc kubenswrapper[4910]: I1125 21:43:52.344970 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-5475b86485-8h9j5" event={"ID":"207712f3-d06c-435f-9a0d-f6a895ee4578","Type":"ContainerStarted","Data":"e5d06bc68be0a54b4a60435b21c84c78b6d95c13c8ce26a2c8c5b04a5aab8a37"} Nov 25 21:43:52 crc kubenswrapper[4910]: I1125 21:43:52.345189 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-5475b86485-8h9j5" Nov 25 21:43:52 crc kubenswrapper[4910]: I1125 21:43:52.366201 4910 scope.go:117] "RemoveContainer" containerID="8ca2d86e9bcec2f798a1131f7f2c38ed1a460ae1c06bc3d14e04dc2ec1d3a716" Nov 25 21:43:52 crc kubenswrapper[4910]: I1125 21:43:52.387722 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-5475b86485-8h9j5" podStartSLOduration=2.132518262 podStartE2EDuration="6.387700656s" podCreationTimestamp="2025-11-25 21:43:46 +0000 UTC" firstStartedPulling="2025-11-25 21:43:47.256880797 +0000 UTC m=+782.719357129" lastFinishedPulling="2025-11-25 21:43:51.512063201 +0000 UTC m=+786.974539523" observedRunningTime="2025-11-25 21:43:52.372805785 +0000 UTC m=+787.835282107" watchObservedRunningTime="2025-11-25 21:43:52.387700656 +0000 UTC m=+787.850176988" Nov 25 21:43:52 crc kubenswrapper[4910]: I1125 21:43:52.392815 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dphj6"] Nov 25 21:43:52 crc kubenswrapper[4910]: I1125 21:43:52.397108 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dphj6"] Nov 25 21:43:52 crc kubenswrapper[4910]: I1125 21:43:52.408053 4910 scope.go:117] "RemoveContainer" containerID="74f2710b16b0d96cbd5335763e9d05781b46e45f117cd1f7561145f82b200932" Nov 25 21:43:53 crc kubenswrapper[4910]: I1125 21:43:53.212851 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="423e842c-ba28-421a-8031-84d5b8d86c4f" path="/var/lib/kubelet/pods/423e842c-ba28-421a-8031-84d5b8d86c4f/volumes" Nov 25 21:43:56 crc kubenswrapper[4910]: I1125 21:43:56.771143 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-5475b86485-8h9j5" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.155951 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-x58hx"] Nov 25 21:44:26 crc kubenswrapper[4910]: E1125 21:44:26.156840 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="423e842c-ba28-421a-8031-84d5b8d86c4f" containerName="registry-server" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.156860 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="423e842c-ba28-421a-8031-84d5b8d86c4f" containerName="registry-server" Nov 25 21:44:26 crc kubenswrapper[4910]: E1125 21:44:26.156881 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="423e842c-ba28-421a-8031-84d5b8d86c4f" containerName="extract-content" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.156890 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="423e842c-ba28-421a-8031-84d5b8d86c4f" containerName="extract-content" Nov 25 21:44:26 crc kubenswrapper[4910]: E1125 21:44:26.156901 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="423e842c-ba28-421a-8031-84d5b8d86c4f" containerName="extract-utilities" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.156910 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="423e842c-ba28-421a-8031-84d5b8d86c4f" containerName="extract-utilities" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.157038 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="423e842c-ba28-421a-8031-84d5b8d86c4f" containerName="registry-server" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.160353 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-x58hx" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.167983 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-9tznd" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.189123 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-gvsd5"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.191847 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-gvsd5" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.202664 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-x58hx"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.213800 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-ddm65" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.226989 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-gbgd2"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.232047 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-gbgd2" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.240834 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2h7kq\" (UniqueName: \"kubernetes.io/projected/ef4b8019-398c-453d-9b78-71c340bf2bdd-kube-api-access-2h7kq\") pod \"cinder-operator-controller-manager-859b6ccc6-gvsd5\" (UID: \"ef4b8019-398c-453d-9b78-71c340bf2bdd\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-gvsd5" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.240904 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwz76\" (UniqueName: \"kubernetes.io/projected/22c915a2-80bf-454b-b0e6-7a5bbafec7a5-kube-api-access-kwz76\") pod \"barbican-operator-controller-manager-7d9dfd778-x58hx\" (UID: \"22c915a2-80bf-454b-b0e6-7a5bbafec7a5\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-x58hx" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.248323 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-rrcwp" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.258473 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-gvsd5"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.282300 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-gbgd2"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.298171 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-776b995c47-chsbs"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.299591 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-776b995c47-chsbs" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.306857 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-qphfb" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.307190 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.311657 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.323699 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-297gb" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.331315 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-776b995c47-chsbs"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.342917 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2h7kq\" (UniqueName: \"kubernetes.io/projected/ef4b8019-398c-453d-9b78-71c340bf2bdd-kube-api-access-2h7kq\") pod \"cinder-operator-controller-manager-859b6ccc6-gvsd5\" (UID: \"ef4b8019-398c-453d-9b78-71c340bf2bdd\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-gvsd5" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.342988 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwz76\" (UniqueName: \"kubernetes.io/projected/22c915a2-80bf-454b-b0e6-7a5bbafec7a5-kube-api-access-kwz76\") pod \"barbican-operator-controller-manager-7d9dfd778-x58hx\" (UID: \"22c915a2-80bf-454b-b0e6-7a5bbafec7a5\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-x58hx" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.343053 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2lpd\" (UniqueName: \"kubernetes.io/projected/cd608fcb-14bd-424e-9f6e-c0eea37397ea-kube-api-access-v2lpd\") pod \"designate-operator-controller-manager-78b4bc895b-gbgd2\" (UID: \"cd608fcb-14bd-424e-9f6e-c0eea37397ea\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-gbgd2" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.371393 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-pqkz6"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.373003 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-pqkz6" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.375102 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.398036 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-n25kb" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.403741 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2h7kq\" (UniqueName: \"kubernetes.io/projected/ef4b8019-398c-453d-9b78-71c340bf2bdd-kube-api-access-2h7kq\") pod \"cinder-operator-controller-manager-859b6ccc6-gvsd5\" (UID: \"ef4b8019-398c-453d-9b78-71c340bf2bdd\") " pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-gvsd5" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.405374 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwz76\" (UniqueName: \"kubernetes.io/projected/22c915a2-80bf-454b-b0e6-7a5bbafec7a5-kube-api-access-kwz76\") pod \"barbican-operator-controller-manager-7d9dfd778-x58hx\" (UID: \"22c915a2-80bf-454b-b0e6-7a5bbafec7a5\") " pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-x58hx" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.411381 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.412600 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.416647 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-pqkz6"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.422792 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.429142 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.429337 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-8lfq4" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.443110 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.444294 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2lpd\" (UniqueName: \"kubernetes.io/projected/cd608fcb-14bd-424e-9f6e-c0eea37397ea-kube-api-access-v2lpd\") pod \"designate-operator-controller-manager-78b4bc895b-gbgd2\" (UID: \"cd608fcb-14bd-424e-9f6e-c0eea37397ea\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-gbgd2" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.444340 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnnr2\" (UniqueName: \"kubernetes.io/projected/28574aa2-4470-4432-b7f0-4b3b52b5f8b9-kube-api-access-vnnr2\") pod \"heat-operator-controller-manager-5f64f6f8bb-vwgmq\" (UID: \"28574aa2-4470-4432-b7f0-4b3b52b5f8b9\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.444398 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdclp\" (UniqueName: \"kubernetes.io/projected/6c888013-ea9c-433c-973f-af7c5c22f8c9-kube-api-access-pdclp\") pod \"horizon-operator-controller-manager-68c6d99b8f-pqkz6\" (UID: \"6c888013-ea9c-433c-973f-af7c5c22f8c9\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-pqkz6" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.444420 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwqgv\" (UniqueName: \"kubernetes.io/projected/bd7a08fe-f30e-4a1e-a92d-7c813fd14fa9-kube-api-access-hwqgv\") pod \"glance-operator-controller-manager-776b995c47-chsbs\" (UID: \"bd7a08fe-f30e-4a1e-a92d-7c813fd14fa9\") " pod="openstack-operators/glance-operator-controller-manager-776b995c47-chsbs" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.444649 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.450439 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.450523 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-ncpqq" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.451413 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.453976 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-2jpd5" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.476865 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2lpd\" (UniqueName: \"kubernetes.io/projected/cd608fcb-14bd-424e-9f6e-c0eea37397ea-kube-api-access-v2lpd\") pod \"designate-operator-controller-manager-78b4bc895b-gbgd2\" (UID: \"cd608fcb-14bd-424e-9f6e-c0eea37397ea\") " pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-gbgd2" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.476931 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-546d4bdf48-4hf76"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.477953 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-4hf76" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.479998 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-2gx7b" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.485165 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.490060 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-x58hx" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.493101 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.517776 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-546d4bdf48-4hf76"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.522834 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.525526 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.535097 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-v6zz4" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.545546 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert\") pod \"infra-operator-controller-manager-57548d458d-9mx4m\" (UID: \"0345d3a7-45fa-4bce-8dcb-4bef18de4b21\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.545594 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qc9dc\" (UniqueName: \"kubernetes.io/projected/2cce005e-33cd-4b63-8798-b0b7eb53ba73-kube-api-access-qc9dc\") pod \"ironic-operator-controller-manager-6c548fd776-q9hg4\" (UID: \"2cce005e-33cd-4b63-8798-b0b7eb53ba73\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.545650 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnnr2\" (UniqueName: \"kubernetes.io/projected/28574aa2-4470-4432-b7f0-4b3b52b5f8b9-kube-api-access-vnnr2\") pod \"heat-operator-controller-manager-5f64f6f8bb-vwgmq\" (UID: \"28574aa2-4470-4432-b7f0-4b3b52b5f8b9\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.545670 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q28tf\" (UniqueName: \"kubernetes.io/projected/c27ac874-a062-4342-9559-a14acbff4c9d-kube-api-access-q28tf\") pod \"keystone-operator-controller-manager-546d4bdf48-4hf76\" (UID: \"c27ac874-a062-4342-9559-a14acbff4c9d\") " pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-4hf76" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.545708 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wsblx\" (UniqueName: \"kubernetes.io/projected/5c6fa310-d85a-4ac3-be15-478635a8c221-kube-api-access-wsblx\") pod \"manila-operator-controller-manager-6546668bfd-z9dd7\" (UID: \"5c6fa310-d85a-4ac3-be15-478635a8c221\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.545730 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdclp\" (UniqueName: \"kubernetes.io/projected/6c888013-ea9c-433c-973f-af7c5c22f8c9-kube-api-access-pdclp\") pod \"horizon-operator-controller-manager-68c6d99b8f-pqkz6\" (UID: \"6c888013-ea9c-433c-973f-af7c5c22f8c9\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-pqkz6" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.545748 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwqgv\" (UniqueName: \"kubernetes.io/projected/bd7a08fe-f30e-4a1e-a92d-7c813fd14fa9-kube-api-access-hwqgv\") pod \"glance-operator-controller-manager-776b995c47-chsbs\" (UID: \"bd7a08fe-f30e-4a1e-a92d-7c813fd14fa9\") " pod="openstack-operators/glance-operator-controller-manager-776b995c47-chsbs" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.545767 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spjs6\" (UniqueName: \"kubernetes.io/projected/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-kube-api-access-spjs6\") pod \"infra-operator-controller-manager-57548d458d-9mx4m\" (UID: \"0345d3a7-45fa-4bce-8dcb-4bef18de4b21\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.549719 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-gvsd5" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.559289 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-t4qfc"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.560380 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-t4qfc" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.565070 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.565380 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-gbgd2" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.565743 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-8m5st" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.572282 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-t4qfc"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.576613 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnnr2\" (UniqueName: \"kubernetes.io/projected/28574aa2-4470-4432-b7f0-4b3b52b5f8b9-kube-api-access-vnnr2\") pod \"heat-operator-controller-manager-5f64f6f8bb-vwgmq\" (UID: \"28574aa2-4470-4432-b7f0-4b3b52b5f8b9\") " pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.579187 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.580633 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.581915 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-xfh4n" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.586667 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.591007 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-czf4h"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.592366 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-czf4h" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.592951 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdclp\" (UniqueName: \"kubernetes.io/projected/6c888013-ea9c-433c-973f-af7c5c22f8c9-kube-api-access-pdclp\") pod \"horizon-operator-controller-manager-68c6d99b8f-pqkz6\" (UID: \"6c888013-ea9c-433c-973f-af7c5c22f8c9\") " pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-pqkz6" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.593490 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwqgv\" (UniqueName: \"kubernetes.io/projected/bd7a08fe-f30e-4a1e-a92d-7c813fd14fa9-kube-api-access-hwqgv\") pod \"glance-operator-controller-manager-776b995c47-chsbs\" (UID: \"bd7a08fe-f30e-4a1e-a92d-7c813fd14fa9\") " pod="openstack-operators/glance-operator-controller-manager-776b995c47-chsbs" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.599151 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-czf4h"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.629130 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.635854 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-m4b56" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.645515 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-776b995c47-chsbs" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.647074 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvf95\" (UniqueName: \"kubernetes.io/projected/44442e02-9c1f-4a6e-bcdd-237b8260638d-kube-api-access-rvf95\") pod \"mariadb-operator-controller-manager-56bbcc9d85-tkpxw\" (UID: \"44442e02-9c1f-4a6e-bcdd-237b8260638d\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.647122 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q28tf\" (UniqueName: \"kubernetes.io/projected/c27ac874-a062-4342-9559-a14acbff4c9d-kube-api-access-q28tf\") pod \"keystone-operator-controller-manager-546d4bdf48-4hf76\" (UID: \"c27ac874-a062-4342-9559-a14acbff4c9d\") " pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-4hf76" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.647155 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvnth\" (UniqueName: \"kubernetes.io/projected/ad233905-ebeb-4698-8261-d8a395be75d7-kube-api-access-kvnth\") pod \"nova-operator-controller-manager-555bbdd45-7f94x\" (UID: \"ad233905-ebeb-4698-8261-d8a395be75d7\") " pod="openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.647189 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wsblx\" (UniqueName: \"kubernetes.io/projected/5c6fa310-d85a-4ac3-be15-478635a8c221-kube-api-access-wsblx\") pod \"manila-operator-controller-manager-6546668bfd-z9dd7\" (UID: \"5c6fa310-d85a-4ac3-be15-478635a8c221\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.647223 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spjs6\" (UniqueName: \"kubernetes.io/projected/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-kube-api-access-spjs6\") pod \"infra-operator-controller-manager-57548d458d-9mx4m\" (UID: \"0345d3a7-45fa-4bce-8dcb-4bef18de4b21\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.647283 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6r4v\" (UniqueName: \"kubernetes.io/projected/eadae0d9-eee7-42f3-aa0e-c42ef3282f24-kube-api-access-k6r4v\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-t4qfc\" (UID: \"eadae0d9-eee7-42f3-aa0e-c42ef3282f24\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-t4qfc" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.647305 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert\") pod \"infra-operator-controller-manager-57548d458d-9mx4m\" (UID: \"0345d3a7-45fa-4bce-8dcb-4bef18de4b21\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.647326 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qc9dc\" (UniqueName: \"kubernetes.io/projected/2cce005e-33cd-4b63-8798-b0b7eb53ba73-kube-api-access-qc9dc\") pod \"ironic-operator-controller-manager-6c548fd776-q9hg4\" (UID: \"2cce005e-33cd-4b63-8798-b0b7eb53ba73\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.647361 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhxx6\" (UniqueName: \"kubernetes.io/projected/2c94f244-a036-47af-8ba4-5dfe41ad5e66-kube-api-access-fhxx6\") pod \"octavia-operator-controller-manager-64cdc6ff96-czf4h\" (UID: \"2c94f244-a036-47af-8ba4-5dfe41ad5e66\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-czf4h" Nov 25 21:44:26 crc kubenswrapper[4910]: E1125 21:44:26.650349 4910 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 21:44:26 crc kubenswrapper[4910]: E1125 21:44:26.651699 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert podName:0345d3a7-45fa-4bce-8dcb-4bef18de4b21 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:27.151674141 +0000 UTC m=+822.614150463 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert") pod "infra-operator-controller-manager-57548d458d-9mx4m" (UID: "0345d3a7-45fa-4bce-8dcb-4bef18de4b21") : secret "infra-operator-webhook-server-cert" not found Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.656187 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.663651 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.663996 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-mwcjb" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.665804 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.680692 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q28tf\" (UniqueName: \"kubernetes.io/projected/c27ac874-a062-4342-9559-a14acbff4c9d-kube-api-access-q28tf\") pod \"keystone-operator-controller-manager-546d4bdf48-4hf76\" (UID: \"c27ac874-a062-4342-9559-a14acbff4c9d\") " pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-4hf76" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.680921 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spjs6\" (UniqueName: \"kubernetes.io/projected/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-kube-api-access-spjs6\") pod \"infra-operator-controller-manager-57548d458d-9mx4m\" (UID: \"0345d3a7-45fa-4bce-8dcb-4bef18de4b21\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.690594 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wsblx\" (UniqueName: \"kubernetes.io/projected/5c6fa310-d85a-4ac3-be15-478635a8c221-kube-api-access-wsblx\") pod \"manila-operator-controller-manager-6546668bfd-z9dd7\" (UID: \"5c6fa310-d85a-4ac3-be15-478635a8c221\") " pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.691476 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qc9dc\" (UniqueName: \"kubernetes.io/projected/2cce005e-33cd-4b63-8798-b0b7eb53ba73-kube-api-access-qc9dc\") pod \"ironic-operator-controller-manager-6c548fd776-q9hg4\" (UID: \"2cce005e-33cd-4b63-8798-b0b7eb53ba73\") " pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.693188 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6c4d7c9757-gpg9j"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.694568 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-6c4d7c9757-gpg9j" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.696741 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-7cwzc" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.709033 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6c4d7c9757-gpg9j"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.722875 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.726318 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.727773 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.729379 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.732518 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-2xlmt" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.749229 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert\") pod \"openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd\" (UID: \"65280dcb-6ac6-443b-88f0-7d3b0dadb4f8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.749365 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6r4v\" (UniqueName: \"kubernetes.io/projected/eadae0d9-eee7-42f3-aa0e-c42ef3282f24-kube-api-access-k6r4v\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-t4qfc\" (UID: \"eadae0d9-eee7-42f3-aa0e-c42ef3282f24\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-t4qfc" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.749426 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhxx6\" (UniqueName: \"kubernetes.io/projected/2c94f244-a036-47af-8ba4-5dfe41ad5e66-kube-api-access-fhxx6\") pod \"octavia-operator-controller-manager-64cdc6ff96-czf4h\" (UID: \"2c94f244-a036-47af-8ba4-5dfe41ad5e66\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-czf4h" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.749448 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvf95\" (UniqueName: \"kubernetes.io/projected/44442e02-9c1f-4a6e-bcdd-237b8260638d-kube-api-access-rvf95\") pod \"mariadb-operator-controller-manager-56bbcc9d85-tkpxw\" (UID: \"44442e02-9c1f-4a6e-bcdd-237b8260638d\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.749495 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cbtw\" (UniqueName: \"kubernetes.io/projected/5c38fd88-4bb1-4d48-a8d4-fe533cbb2d0c-kube-api-access-4cbtw\") pod \"ovn-operator-controller-manager-6c4d7c9757-gpg9j\" (UID: \"5c38fd88-4bb1-4d48-a8d4-fe533cbb2d0c\") " pod="openstack-operators/ovn-operator-controller-manager-6c4d7c9757-gpg9j" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.749521 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvnth\" (UniqueName: \"kubernetes.io/projected/ad233905-ebeb-4698-8261-d8a395be75d7-kube-api-access-kvnth\") pod \"nova-operator-controller-manager-555bbdd45-7f94x\" (UID: \"ad233905-ebeb-4698-8261-d8a395be75d7\") " pod="openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.749543 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd8rx\" (UniqueName: \"kubernetes.io/projected/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-kube-api-access-sd8rx\") pod \"openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd\" (UID: \"65280dcb-6ac6-443b-88f0-7d3b0dadb4f8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.750379 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-pqkz6" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.752195 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6zxdg"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.753469 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6zxdg" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.755353 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-4zcln" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.770638 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.771765 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.777129 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-jk89v" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.779989 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvnth\" (UniqueName: \"kubernetes.io/projected/ad233905-ebeb-4698-8261-d8a395be75d7-kube-api-access-kvnth\") pod \"nova-operator-controller-manager-555bbdd45-7f94x\" (UID: \"ad233905-ebeb-4698-8261-d8a395be75d7\") " pod="openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.782655 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6zxdg"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.791829 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.800099 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.801187 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.801488 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhxx6\" (UniqueName: \"kubernetes.io/projected/2c94f244-a036-47af-8ba4-5dfe41ad5e66-kube-api-access-fhxx6\") pod \"octavia-operator-controller-manager-64cdc6ff96-czf4h\" (UID: \"2c94f244-a036-47af-8ba4-5dfe41ad5e66\") " pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-czf4h" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.806199 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-ztmfv" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.806528 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.811666 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvf95\" (UniqueName: \"kubernetes.io/projected/44442e02-9c1f-4a6e-bcdd-237b8260638d-kube-api-access-rvf95\") pod \"mariadb-operator-controller-manager-56bbcc9d85-tkpxw\" (UID: \"44442e02-9c1f-4a6e-bcdd-237b8260638d\") " pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.815685 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6r4v\" (UniqueName: \"kubernetes.io/projected/eadae0d9-eee7-42f3-aa0e-c42ef3282f24-kube-api-access-k6r4v\") pod \"neutron-operator-controller-manager-5fdfd5b6b5-t4qfc\" (UID: \"eadae0d9-eee7-42f3-aa0e-c42ef3282f24\") " pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-t4qfc" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.828727 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.850425 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2d8gb\" (UniqueName: \"kubernetes.io/projected/e117033c-b566-4c46-bd57-9e173e88a224-kube-api-access-2d8gb\") pod \"telemetry-operator-controller-manager-76cc84c6bb-6zxdg\" (UID: \"e117033c-b566-4c46-bd57-9e173e88a224\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6zxdg" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.850494 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert\") pod \"openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd\" (UID: \"65280dcb-6ac6-443b-88f0-7d3b0dadb4f8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.850647 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h89m4\" (UniqueName: \"kubernetes.io/projected/bb7d559d-1779-400f-b556-1adbb0c61b60-kube-api-access-h89m4\") pod \"test-operator-controller-manager-74fcfc6d4b-2jdvk\" (UID: \"bb7d559d-1779-400f-b556-1adbb0c61b60\") " pod="openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.850682 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sv526\" (UniqueName: \"kubernetes.io/projected/f3384730-e8d8-4e36-9f3e-8e2dbf3176cb-kube-api-access-sv526\") pod \"placement-operator-controller-manager-78f8948974-wsd5q\" (UID: \"f3384730-e8d8-4e36-9f3e-8e2dbf3176cb\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.850709 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vl6vd\" (UniqueName: \"kubernetes.io/projected/ba6c852e-59d0-4e5a-8967-3502457d62ec-kube-api-access-vl6vd\") pod \"swift-operator-controller-manager-5f8c65bbfc-wc77r\" (UID: \"ba6c852e-59d0-4e5a-8967-3502457d62ec\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.850734 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cbtw\" (UniqueName: \"kubernetes.io/projected/5c38fd88-4bb1-4d48-a8d4-fe533cbb2d0c-kube-api-access-4cbtw\") pod \"ovn-operator-controller-manager-6c4d7c9757-gpg9j\" (UID: \"5c38fd88-4bb1-4d48-a8d4-fe533cbb2d0c\") " pod="openstack-operators/ovn-operator-controller-manager-6c4d7c9757-gpg9j" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.850806 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd8rx\" (UniqueName: \"kubernetes.io/projected/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-kube-api-access-sd8rx\") pod \"openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd\" (UID: \"65280dcb-6ac6-443b-88f0-7d3b0dadb4f8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:26 crc kubenswrapper[4910]: E1125 21:44:26.851823 4910 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 21:44:26 crc kubenswrapper[4910]: E1125 21:44:26.851874 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert podName:65280dcb-6ac6-443b-88f0-7d3b0dadb4f8 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:27.351860146 +0000 UTC m=+822.814336468 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert") pod "openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" (UID: "65280dcb-6ac6-443b-88f0-7d3b0dadb4f8") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.875680 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cbtw\" (UniqueName: \"kubernetes.io/projected/5c38fd88-4bb1-4d48-a8d4-fe533cbb2d0c-kube-api-access-4cbtw\") pod \"ovn-operator-controller-manager-6c4d7c9757-gpg9j\" (UID: \"5c38fd88-4bb1-4d48-a8d4-fe533cbb2d0c\") " pod="openstack-operators/ovn-operator-controller-manager-6c4d7c9757-gpg9j" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.875790 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd8rx\" (UniqueName: \"kubernetes.io/projected/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-kube-api-access-sd8rx\") pod \"openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd\" (UID: \"65280dcb-6ac6-443b-88f0-7d3b0dadb4f8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.876080 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.884743 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.892160 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.897494 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-v2dcz" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.902869 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.917926 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-4hf76" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.932792 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.952892 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2d8gb\" (UniqueName: \"kubernetes.io/projected/e117033c-b566-4c46-bd57-9e173e88a224-kube-api-access-2d8gb\") pod \"telemetry-operator-controller-manager-76cc84c6bb-6zxdg\" (UID: \"e117033c-b566-4c46-bd57-9e173e88a224\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6zxdg" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.952944 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnrr7\" (UniqueName: \"kubernetes.io/projected/0ac69701-2b03-4d80-bb8f-8f46acb193e4-kube-api-access-bnrr7\") pod \"watcher-operator-controller-manager-656dcb59d4-g9p8x\" (UID: \"0ac69701-2b03-4d80-bb8f-8f46acb193e4\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.953023 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h89m4\" (UniqueName: \"kubernetes.io/projected/bb7d559d-1779-400f-b556-1adbb0c61b60-kube-api-access-h89m4\") pod \"test-operator-controller-manager-74fcfc6d4b-2jdvk\" (UID: \"bb7d559d-1779-400f-b556-1adbb0c61b60\") " pod="openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.953045 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sv526\" (UniqueName: \"kubernetes.io/projected/f3384730-e8d8-4e36-9f3e-8e2dbf3176cb-kube-api-access-sv526\") pod \"placement-operator-controller-manager-78f8948974-wsd5q\" (UID: \"f3384730-e8d8-4e36-9f3e-8e2dbf3176cb\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.953070 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vl6vd\" (UniqueName: \"kubernetes.io/projected/ba6c852e-59d0-4e5a-8967-3502457d62ec-kube-api-access-vl6vd\") pod \"swift-operator-controller-manager-5f8c65bbfc-wc77r\" (UID: \"ba6c852e-59d0-4e5a-8967-3502457d62ec\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.961397 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-t4qfc" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.967837 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.968749 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.974054 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.974198 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-sh6z4" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.974308 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.985557 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2d8gb\" (UniqueName: \"kubernetes.io/projected/e117033c-b566-4c46-bd57-9e173e88a224-kube-api-access-2d8gb\") pod \"telemetry-operator-controller-manager-76cc84c6bb-6zxdg\" (UID: \"e117033c-b566-4c46-bd57-9e173e88a224\") " pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6zxdg" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.985639 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.986484 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sv526\" (UniqueName: \"kubernetes.io/projected/f3384730-e8d8-4e36-9f3e-8e2dbf3176cb-kube-api-access-sv526\") pod \"placement-operator-controller-manager-78f8948974-wsd5q\" (UID: \"f3384730-e8d8-4e36-9f3e-8e2dbf3176cb\") " pod="openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.988058 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-dvpfs"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.989125 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-dvpfs" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.994935 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x" Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.995717 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-dvpfs"] Nov 25 21:44:26 crc kubenswrapper[4910]: I1125 21:44:26.998129 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-cc279" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.004399 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h89m4\" (UniqueName: \"kubernetes.io/projected/bb7d559d-1779-400f-b556-1adbb0c61b60-kube-api-access-h89m4\") pod \"test-operator-controller-manager-74fcfc6d4b-2jdvk\" (UID: \"bb7d559d-1779-400f-b556-1adbb0c61b60\") " pod="openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.004565 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vl6vd\" (UniqueName: \"kubernetes.io/projected/ba6c852e-59d0-4e5a-8967-3502457d62ec-kube-api-access-vl6vd\") pod \"swift-operator-controller-manager-5f8c65bbfc-wc77r\" (UID: \"ba6c852e-59d0-4e5a-8967-3502457d62ec\") " pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.008039 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-czf4h" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.055136 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp57k\" (UniqueName: \"kubernetes.io/projected/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-kube-api-access-kp57k\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.055195 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56v82\" (UniqueName: \"kubernetes.io/projected/838225a6-f682-4181-aeab-073767c8d49a-kube-api-access-56v82\") pod \"rabbitmq-cluster-operator-manager-668c99d594-dvpfs\" (UID: \"838225a6-f682-4181-aeab-073767c8d49a\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-dvpfs" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.055228 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.055256 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.055291 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnrr7\" (UniqueName: \"kubernetes.io/projected/0ac69701-2b03-4d80-bb8f-8f46acb193e4-kube-api-access-bnrr7\") pod \"watcher-operator-controller-manager-656dcb59d4-g9p8x\" (UID: \"0ac69701-2b03-4d80-bb8f-8f46acb193e4\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.086580 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnrr7\" (UniqueName: \"kubernetes.io/projected/0ac69701-2b03-4d80-bb8f-8f46acb193e4-kube-api-access-bnrr7\") pod \"watcher-operator-controller-manager-656dcb59d4-g9p8x\" (UID: \"0ac69701-2b03-4d80-bb8f-8f46acb193e4\") " pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.169974 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert\") pod \"infra-operator-controller-manager-57548d458d-9mx4m\" (UID: \"0345d3a7-45fa-4bce-8dcb-4bef18de4b21\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.170089 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp57k\" (UniqueName: \"kubernetes.io/projected/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-kube-api-access-kp57k\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.170151 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56v82\" (UniqueName: \"kubernetes.io/projected/838225a6-f682-4181-aeab-073767c8d49a-kube-api-access-56v82\") pod \"rabbitmq-cluster-operator-manager-668c99d594-dvpfs\" (UID: \"838225a6-f682-4181-aeab-073767c8d49a\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-dvpfs" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.170201 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.170224 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:27 crc kubenswrapper[4910]: E1125 21:44:27.170439 4910 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 21:44:27 crc kubenswrapper[4910]: E1125 21:44:27.170508 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs podName:9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:27.670489967 +0000 UTC m=+823.132966279 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs") pod "openstack-operator-controller-manager-758b84fd57-x2sxf" (UID: "9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9") : secret "metrics-server-cert" not found Nov 25 21:44:27 crc kubenswrapper[4910]: E1125 21:44:27.170871 4910 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 21:44:27 crc kubenswrapper[4910]: E1125 21:44:27.170902 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert podName:0345d3a7-45fa-4bce-8dcb-4bef18de4b21 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:28.170894268 +0000 UTC m=+823.633370590 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert") pod "infra-operator-controller-manager-57548d458d-9mx4m" (UID: "0345d3a7-45fa-4bce-8dcb-4bef18de4b21") : secret "infra-operator-webhook-server-cert" not found Nov 25 21:44:27 crc kubenswrapper[4910]: E1125 21:44:27.171326 4910 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 21:44:27 crc kubenswrapper[4910]: E1125 21:44:27.171372 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs podName:9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:27.671359541 +0000 UTC m=+823.133835863 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs") pod "openstack-operator-controller-manager-758b84fd57-x2sxf" (UID: "9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9") : secret "webhook-server-cert" not found Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.171578 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-6c4d7c9757-gpg9j" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.192215 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.209728 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56v82\" (UniqueName: \"kubernetes.io/projected/838225a6-f682-4181-aeab-073767c8d49a-kube-api-access-56v82\") pod \"rabbitmq-cluster-operator-manager-668c99d594-dvpfs\" (UID: \"838225a6-f682-4181-aeab-073767c8d49a\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-dvpfs" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.216126 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.237751 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6zxdg" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.250001 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp57k\" (UniqueName: \"kubernetes.io/projected/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-kube-api-access-kp57k\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.271765 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.286643 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.306006 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7d9dfd778-x58hx"] Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.360776 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-dvpfs" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.376843 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert\") pod \"openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd\" (UID: \"65280dcb-6ac6-443b-88f0-7d3b0dadb4f8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:27 crc kubenswrapper[4910]: E1125 21:44:27.377109 4910 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 21:44:27 crc kubenswrapper[4910]: E1125 21:44:27.377276 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert podName:65280dcb-6ac6-443b-88f0-7d3b0dadb4f8 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:28.377224248 +0000 UTC m=+823.839700570 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert") pod "openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" (UID: "65280dcb-6ac6-443b-88f0-7d3b0dadb4f8") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.617444 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-859b6ccc6-gvsd5"] Nov 25 21:44:27 crc kubenswrapper[4910]: W1125 21:44:27.626510 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef4b8019_398c_453d_9b78_71c340bf2bdd.slice/crio-034d49dc77f139f787c2e535216e366735a8e8aa425037c5be39226ffc9c5385 WatchSource:0}: Error finding container 034d49dc77f139f787c2e535216e366735a8e8aa425037c5be39226ffc9c5385: Status 404 returned error can't find the container with id 034d49dc77f139f787c2e535216e366735a8e8aa425037c5be39226ffc9c5385 Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.630372 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-776b995c47-chsbs"] Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.639839 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-x58hx" event={"ID":"22c915a2-80bf-454b-b0e6-7a5bbafec7a5","Type":"ContainerStarted","Data":"82f902b8adf53632b08c46b4580921f87bf812d1a15841135840b20e66279f54"} Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.682609 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.682669 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:27 crc kubenswrapper[4910]: E1125 21:44:27.682859 4910 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 21:44:27 crc kubenswrapper[4910]: E1125 21:44:27.682917 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs podName:9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:28.682900561 +0000 UTC m=+824.145376883 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs") pod "openstack-operator-controller-manager-758b84fd57-x2sxf" (UID: "9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9") : secret "metrics-server-cert" not found Nov 25 21:44:27 crc kubenswrapper[4910]: E1125 21:44:27.683151 4910 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 21:44:27 crc kubenswrapper[4910]: E1125 21:44:27.683219 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs podName:9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:28.683201639 +0000 UTC m=+824.145677961 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs") pod "openstack-operator-controller-manager-758b84fd57-x2sxf" (UID: "9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9") : secret "webhook-server-cert" not found Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.904783 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq"] Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.912466 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-68c6d99b8f-pqkz6"] Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.922272 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-78b4bc895b-gbgd2"] Nov 25 21:44:27 crc kubenswrapper[4910]: I1125 21:44:27.926598 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4"] Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.099233 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-64cdc6ff96-czf4h"] Nov 25 21:44:28 crc kubenswrapper[4910]: W1125 21:44:28.105770 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5c38fd88_4bb1_4d48_a8d4_fe533cbb2d0c.slice/crio-7533d86ed6c30f763d7164cadc302510d5d83d5a31f53f602b76818dd8135dab WatchSource:0}: Error finding container 7533d86ed6c30f763d7164cadc302510d5d83d5a31f53f602b76818dd8135dab: Status 404 returned error can't find the container with id 7533d86ed6c30f763d7164cadc302510d5d83d5a31f53f602b76818dd8135dab Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.107886 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw"] Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.115602 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-546d4bdf48-4hf76"] Nov 25 21:44:28 crc kubenswrapper[4910]: W1125 21:44:28.116704 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeadae0d9_eee7_42f3_aa0e_c42ef3282f24.slice/crio-11f047cf71d1f63e025d1678c746acc2910ffd90273127fb56ec9b7a363118b8 WatchSource:0}: Error finding container 11f047cf71d1f63e025d1678c746acc2910ffd90273127fb56ec9b7a363118b8: Status 404 returned error can't find the container with id 11f047cf71d1f63e025d1678c746acc2910ffd90273127fb56ec9b7a363118b8 Nov 25 21:44:28 crc kubenswrapper[4910]: W1125 21:44:28.119547 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc27ac874_a062_4342_9559_a14acbff4c9d.slice/crio-ca2a9881d9b0c0152830ac9f55239de0d6832ecbbc80a5dad52383a653299ef9 WatchSource:0}: Error finding container ca2a9881d9b0c0152830ac9f55239de0d6832ecbbc80a5dad52383a653299ef9: Status 404 returned error can't find the container with id ca2a9881d9b0c0152830ac9f55239de0d6832ecbbc80a5dad52383a653299ef9 Nov 25 21:44:28 crc kubenswrapper[4910]: W1125 21:44:28.121830 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podad233905_ebeb_4698_8261_d8a395be75d7.slice/crio-6face14cde649730ddaabe037475f74fcff9e4ef72090428c57be7da4cfcc348 WatchSource:0}: Error finding container 6face14cde649730ddaabe037475f74fcff9e4ef72090428c57be7da4cfcc348: Status 404 returned error can't find the container with id 6face14cde649730ddaabe037475f74fcff9e4ef72090428c57be7da4cfcc348 Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.122512 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-6c4d7c9757-gpg9j"] Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.125214 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:acfd91a6bf548da786b7c3fe896b4e203ed5e8ce702d70e12f9d8e2c09b9dbb5,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kvnth,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-555bbdd45-7f94x_openstack-operators(ad233905-ebeb-4698-8261-d8a395be75d7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.127579 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kvnth,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-555bbdd45-7f94x_openstack-operators(ad233905-ebeb-4698-8261-d8a395be75d7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.128049 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7"] Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.129348 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x" podUID="ad233905-ebeb-4698-8261-d8a395be75d7" Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.132620 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-t4qfc"] Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.136867 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x"] Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.190564 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert\") pod \"infra-operator-controller-manager-57548d458d-9mx4m\" (UID: \"0345d3a7-45fa-4bce-8dcb-4bef18de4b21\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.190723 4910 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.190804 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert podName:0345d3a7-45fa-4bce-8dcb-4bef18de4b21 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:30.190781983 +0000 UTC m=+825.653258305 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert") pod "infra-operator-controller-manager-57548d458d-9mx4m" (UID: "0345d3a7-45fa-4bce-8dcb-4bef18de4b21") : secret "infra-operator-webhook-server-cert" not found Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.250886 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6zxdg"] Nov 25 21:44:28 crc kubenswrapper[4910]: W1125 21:44:28.258154 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode117033c_b566_4c46_bd57_9e173e88a224.slice/crio-fe4bdf9048d73256dade5b20a563c1e2544373472a97d808fb7f8fe825333f17 WatchSource:0}: Error finding container fe4bdf9048d73256dade5b20a563c1e2544373472a97d808fb7f8fe825333f17: Status 404 returned error can't find the container with id fe4bdf9048d73256dade5b20a563c1e2544373472a97d808fb7f8fe825333f17 Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.261490 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x"] Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.265408 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-dvpfs"] Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.268938 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r"] Nov 25 21:44:28 crc kubenswrapper[4910]: W1125 21:44:28.272688 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0ac69701_2b03_4d80_bb8f_8f46acb193e4.slice/crio-74034ee89750c6896cde5ee9c0dfdfb341a1e45cd9f179b06f681b8237315fb5 WatchSource:0}: Error finding container 74034ee89750c6896cde5ee9c0dfdfb341a1e45cd9f179b06f681b8237315fb5: Status 404 returned error can't find the container with id 74034ee89750c6896cde5ee9c0dfdfb341a1e45cd9f179b06f681b8237315fb5 Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.276020 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bnrr7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-g9p8x_openstack-operators(0ac69701-2b03-4d80-bb8f-8f46acb193e4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.280541 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-bnrr7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-656dcb59d4-g9p8x_openstack-operators(0ac69701-2b03-4d80-bb8f-8f46acb193e4): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.281812 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x" podUID="0ac69701-2b03-4d80-bb8f-8f46acb193e4" Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.285431 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-56v82,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-dvpfs_openstack-operators(838225a6-f682-4181-aeab-073767c8d49a): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.286832 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-dvpfs" podUID="838225a6-f682-4181-aeab-073767c8d49a" Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.289611 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q"] Nov 25 21:44:28 crc kubenswrapper[4910]: W1125 21:44:28.294793 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podba6c852e_59d0_4e5a_8967_3502457d62ec.slice/crio-9b1e0a03cb98e86d065d1f886684c858c0e35aeeed53452113e7a3b56e41eec3 WatchSource:0}: Error finding container 9b1e0a03cb98e86d065d1f886684c858c0e35aeeed53452113e7a3b56e41eec3: Status 404 returned error can't find the container with id 9b1e0a03cb98e86d065d1f886684c858c0e35aeeed53452113e7a3b56e41eec3 Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.302153 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vl6vd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f8c65bbfc-wc77r_openstack-operators(ba6c852e-59d0-4e5a-8967-3502457d62ec): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.309480 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vl6vd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-5f8c65bbfc-wc77r_openstack-operators(ba6c852e-59d0-4e5a-8967-3502457d62ec): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.310675 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r" podUID="ba6c852e-59d0-4e5a-8967-3502457d62ec" Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.311845 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sv526,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-wsd5q_openstack-operators(f3384730-e8d8-4e36-9f3e-8e2dbf3176cb): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.312140 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk"] Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.313667 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-sv526,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-78f8948974-wsd5q_openstack-operators(f3384730-e8d8-4e36-9f3e-8e2dbf3176cb): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.314791 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q" podUID="f3384730-e8d8-4e36-9f3e-8e2dbf3176cb" Nov 25 21:44:28 crc kubenswrapper[4910]: W1125 21:44:28.321839 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbb7d559d_1779_400f_b556_1adbb0c61b60.slice/crio-0177c04df9185413b942cefbd558cf999fe4c2d3f37c5b0f54887d5e443b15d8 WatchSource:0}: Error finding container 0177c04df9185413b942cefbd558cf999fe4c2d3f37c5b0f54887d5e443b15d8: Status 404 returned error can't find the container with id 0177c04df9185413b942cefbd558cf999fe4c2d3f37c5b0f54887d5e443b15d8 Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.323168 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:abb40304ad3aa779060ff474f0f39cff2b8348a3576ac46ea4eb0e0a5caed3cd,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h89m4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-74fcfc6d4b-2jdvk_openstack-operators(bb7d559d-1779-400f-b556-1adbb0c61b60): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.325234 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-h89m4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-74fcfc6d4b-2jdvk_openstack-operators(bb7d559d-1779-400f-b556-1adbb0c61b60): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.326506 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"]" pod="openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk" podUID="bb7d559d-1779-400f-b556-1adbb0c61b60" Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.395481 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert\") pod \"openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd\" (UID: \"65280dcb-6ac6-443b-88f0-7d3b0dadb4f8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.395875 4910 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.395930 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert podName:65280dcb-6ac6-443b-88f0-7d3b0dadb4f8 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:30.395914211 +0000 UTC m=+825.858390533 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert") pod "openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" (UID: "65280dcb-6ac6-443b-88f0-7d3b0dadb4f8") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.647184 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-pqkz6" event={"ID":"6c888013-ea9c-433c-973f-af7c5c22f8c9","Type":"ContainerStarted","Data":"d86ebe535d00434985fec981570744ce963427d51bf60d0b2e485686d83a3a31"} Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.648430 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" event={"ID":"28574aa2-4470-4432-b7f0-4b3b52b5f8b9","Type":"ContainerStarted","Data":"a9df43091edac8af13ac2d8d36445a4b6da43d7dbbca5c9c01e72346186f97be"} Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.649433 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-4hf76" event={"ID":"c27ac874-a062-4342-9559-a14acbff4c9d","Type":"ContainerStarted","Data":"ca2a9881d9b0c0152830ac9f55239de0d6832ecbbc80a5dad52383a653299ef9"} Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.650255 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-776b995c47-chsbs" event={"ID":"bd7a08fe-f30e-4a1e-a92d-7c813fd14fa9","Type":"ContainerStarted","Data":"b2f6191a15345870769698f33ab8b51178f244f4a875676d710cf278e86680a4"} Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.651019 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6zxdg" event={"ID":"e117033c-b566-4c46-bd57-9e173e88a224","Type":"ContainerStarted","Data":"fe4bdf9048d73256dade5b20a563c1e2544373472a97d808fb7f8fe825333f17"} Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.651887 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" event={"ID":"44442e02-9c1f-4a6e-bcdd-237b8260638d","Type":"ContainerStarted","Data":"47b32bf49a61e3916bbdbfcdec98dfa8ec2de24970ad6d4ad8234a8982a3df09"} Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.652794 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6c4d7c9757-gpg9j" event={"ID":"5c38fd88-4bb1-4d48-a8d4-fe533cbb2d0c","Type":"ContainerStarted","Data":"7533d86ed6c30f763d7164cadc302510d5d83d5a31f53f602b76818dd8135dab"} Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.653644 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x" event={"ID":"0ac69701-2b03-4d80-bb8f-8f46acb193e4","Type":"ContainerStarted","Data":"74034ee89750c6896cde5ee9c0dfdfb341a1e45cd9f179b06f681b8237315fb5"} Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.655801 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x" podUID="0ac69701-2b03-4d80-bb8f-8f46acb193e4" Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.656402 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q" event={"ID":"f3384730-e8d8-4e36-9f3e-8e2dbf3176cb","Type":"ContainerStarted","Data":"68f2841f5c4ddbcdaa03f41d1ac7f12b61597710cc4b964b839aef8ec624343f"} Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.658396 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q" podUID="f3384730-e8d8-4e36-9f3e-8e2dbf3176cb" Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.659194 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-gbgd2" event={"ID":"cd608fcb-14bd-424e-9f6e-c0eea37397ea","Type":"ContainerStarted","Data":"f85af2119e7ea6fa804001a7ccb09b8a7fcd40ffdea634b7a597735436d67c07"} Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.660869 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-dvpfs" event={"ID":"838225a6-f682-4181-aeab-073767c8d49a","Type":"ContainerStarted","Data":"884454e5883d910e6a7fb8a6fb465834dc9c5f2992e96ca7546dbe0826d06cea"} Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.662315 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-dvpfs" podUID="838225a6-f682-4181-aeab-073767c8d49a" Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.663507 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-t4qfc" event={"ID":"eadae0d9-eee7-42f3-aa0e-c42ef3282f24","Type":"ContainerStarted","Data":"11f047cf71d1f63e025d1678c746acc2910ffd90273127fb56ec9b7a363118b8"} Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.664680 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-gvsd5" event={"ID":"ef4b8019-398c-453d-9b78-71c340bf2bdd","Type":"ContainerStarted","Data":"034d49dc77f139f787c2e535216e366735a8e8aa425037c5be39226ffc9c5385"} Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.668056 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk" event={"ID":"bb7d559d-1779-400f-b556-1adbb0c61b60","Type":"ContainerStarted","Data":"0177c04df9185413b942cefbd558cf999fe4c2d3f37c5b0f54887d5e443b15d8"} Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.671194 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-czf4h" event={"ID":"2c94f244-a036-47af-8ba4-5dfe41ad5e66","Type":"ContainerStarted","Data":"f883ec48c2002215446c72e0abfeb5a01bd81b8de9b4956e6f8536598c797e8b"} Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.671394 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:abb40304ad3aa779060ff474f0f39cff2b8348a3576ac46ea4eb0e0a5caed3cd\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk" podUID="bb7d559d-1779-400f-b556-1adbb0c61b60" Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.673313 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x" event={"ID":"ad233905-ebeb-4698-8261-d8a395be75d7","Type":"ContainerStarted","Data":"6face14cde649730ddaabe037475f74fcff9e4ef72090428c57be7da4cfcc348"} Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.675587 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:acfd91a6bf548da786b7c3fe896b4e203ed5e8ce702d70e12f9d8e2c09b9dbb5\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x" podUID="ad233905-ebeb-4698-8261-d8a395be75d7" Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.676229 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" event={"ID":"2cce005e-33cd-4b63-8798-b0b7eb53ba73","Type":"ContainerStarted","Data":"3776ebc7b76344fd563d2e4a659865429818ed5e7fe3b7318e5b564b4a648b53"} Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.677518 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" event={"ID":"5c6fa310-d85a-4ac3-be15-478635a8c221","Type":"ContainerStarted","Data":"1081d83d66697866b225e94cd4b0df10ca12db68957e15aa66704b8a3a28868f"} Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.678650 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r" event={"ID":"ba6c852e-59d0-4e5a-8967-3502457d62ec","Type":"ContainerStarted","Data":"9b1e0a03cb98e86d065d1f886684c858c0e35aeeed53452113e7a3b56e41eec3"} Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.681673 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r" podUID="ba6c852e-59d0-4e5a-8967-3502457d62ec" Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.699388 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:28 crc kubenswrapper[4910]: I1125 21:44:28.699457 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.700094 4910 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.700144 4910 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.700200 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs podName:9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:30.700175915 +0000 UTC m=+826.162652427 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs") pod "openstack-operator-controller-manager-758b84fd57-x2sxf" (UID: "9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9") : secret "webhook-server-cert" not found Nov 25 21:44:28 crc kubenswrapper[4910]: E1125 21:44:28.700226 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs podName:9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:30.700216566 +0000 UTC m=+826.162693128 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs") pod "openstack-operator-controller-manager-758b84fd57-x2sxf" (UID: "9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9") : secret "metrics-server-cert" not found Nov 25 21:44:29 crc kubenswrapper[4910]: E1125 21:44:29.689150 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:6bed55b172b9ee8ccc3952cbfc543d8bd44e2690f6db94348a754152fd78f4cf\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x" podUID="0ac69701-2b03-4d80-bb8f-8f46acb193e4" Nov 25 21:44:29 crc kubenswrapper[4910]: E1125 21:44:29.689562 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:abb40304ad3aa779060ff474f0f39cff2b8348a3576ac46ea4eb0e0a5caed3cd\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk" podUID="bb7d559d-1779-400f-b556-1adbb0c61b60" Nov 25 21:44:29 crc kubenswrapper[4910]: E1125 21:44:29.689731 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:2a3d21728a8bfb4e64617e63e61e2d1cb70a383ea3e8f846e0c3c3c02d2b0a9d\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r" podUID="ba6c852e-59d0-4e5a-8967-3502457d62ec" Nov 25 21:44:29 crc kubenswrapper[4910]: E1125 21:44:29.690093 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:acfd91a6bf548da786b7c3fe896b4e203ed5e8ce702d70e12f9d8e2c09b9dbb5\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x" podUID="ad233905-ebeb-4698-8261-d8a395be75d7" Nov 25 21:44:29 crc kubenswrapper[4910]: E1125 21:44:29.690497 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-dvpfs" podUID="838225a6-f682-4181-aeab-073767c8d49a" Nov 25 21:44:29 crc kubenswrapper[4910]: E1125 21:44:29.691945 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:d29650b006da97eb9178fcc58f2eb9fead8c2b414fac18f86a3c3a1507488c4f\\\"\", failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"]" pod="openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q" podUID="f3384730-e8d8-4e36-9f3e-8e2dbf3176cb" Nov 25 21:44:30 crc kubenswrapper[4910]: I1125 21:44:30.223038 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert\") pod \"infra-operator-controller-manager-57548d458d-9mx4m\" (UID: \"0345d3a7-45fa-4bce-8dcb-4bef18de4b21\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:44:30 crc kubenswrapper[4910]: E1125 21:44:30.223256 4910 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 21:44:30 crc kubenswrapper[4910]: E1125 21:44:30.223320 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert podName:0345d3a7-45fa-4bce-8dcb-4bef18de4b21 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:34.223301747 +0000 UTC m=+829.685778069 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert") pod "infra-operator-controller-manager-57548d458d-9mx4m" (UID: "0345d3a7-45fa-4bce-8dcb-4bef18de4b21") : secret "infra-operator-webhook-server-cert" not found Nov 25 21:44:30 crc kubenswrapper[4910]: I1125 21:44:30.426365 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert\") pod \"openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd\" (UID: \"65280dcb-6ac6-443b-88f0-7d3b0dadb4f8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:30 crc kubenswrapper[4910]: E1125 21:44:30.426612 4910 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 21:44:30 crc kubenswrapper[4910]: E1125 21:44:30.426829 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert podName:65280dcb-6ac6-443b-88f0-7d3b0dadb4f8 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:34.426798441 +0000 UTC m=+829.889274763 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert") pod "openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" (UID: "65280dcb-6ac6-443b-88f0-7d3b0dadb4f8") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 21:44:30 crc kubenswrapper[4910]: I1125 21:44:30.731405 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:30 crc kubenswrapper[4910]: I1125 21:44:30.731452 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:30 crc kubenswrapper[4910]: E1125 21:44:30.731623 4910 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 21:44:30 crc kubenswrapper[4910]: E1125 21:44:30.731666 4910 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 21:44:30 crc kubenswrapper[4910]: E1125 21:44:30.731689 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs podName:9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:34.731670902 +0000 UTC m=+830.194147224 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs") pod "openstack-operator-controller-manager-758b84fd57-x2sxf" (UID: "9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9") : secret "metrics-server-cert" not found Nov 25 21:44:30 crc kubenswrapper[4910]: E1125 21:44:30.731765 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs podName:9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:34.731746264 +0000 UTC m=+830.194222586 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs") pod "openstack-operator-controller-manager-758b84fd57-x2sxf" (UID: "9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9") : secret "webhook-server-cert" not found Nov 25 21:44:34 crc kubenswrapper[4910]: I1125 21:44:34.224965 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert\") pod \"infra-operator-controller-manager-57548d458d-9mx4m\" (UID: \"0345d3a7-45fa-4bce-8dcb-4bef18de4b21\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:44:34 crc kubenswrapper[4910]: E1125 21:44:34.225228 4910 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Nov 25 21:44:34 crc kubenswrapper[4910]: E1125 21:44:34.225585 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert podName:0345d3a7-45fa-4bce-8dcb-4bef18de4b21 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:42.225568068 +0000 UTC m=+837.688044390 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert") pod "infra-operator-controller-manager-57548d458d-9mx4m" (UID: "0345d3a7-45fa-4bce-8dcb-4bef18de4b21") : secret "infra-operator-webhook-server-cert" not found Nov 25 21:44:34 crc kubenswrapper[4910]: I1125 21:44:34.428593 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert\") pod \"openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd\" (UID: \"65280dcb-6ac6-443b-88f0-7d3b0dadb4f8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:34 crc kubenswrapper[4910]: E1125 21:44:34.428860 4910 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 21:44:34 crc kubenswrapper[4910]: E1125 21:44:34.428972 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert podName:65280dcb-6ac6-443b-88f0-7d3b0dadb4f8 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:42.428946089 +0000 UTC m=+837.891422411 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert") pod "openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" (UID: "65280dcb-6ac6-443b-88f0-7d3b0dadb4f8") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 25 21:44:34 crc kubenswrapper[4910]: I1125 21:44:34.732613 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:34 crc kubenswrapper[4910]: I1125 21:44:34.732666 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:34 crc kubenswrapper[4910]: E1125 21:44:34.732808 4910 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Nov 25 21:44:34 crc kubenswrapper[4910]: E1125 21:44:34.732863 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs podName:9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:42.732847314 +0000 UTC m=+838.195323636 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs") pod "openstack-operator-controller-manager-758b84fd57-x2sxf" (UID: "9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9") : secret "metrics-server-cert" not found Nov 25 21:44:34 crc kubenswrapper[4910]: E1125 21:44:34.733179 4910 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 25 21:44:34 crc kubenswrapper[4910]: E1125 21:44:34.733219 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs podName:9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9 nodeName:}" failed. No retries permitted until 2025-11-25 21:44:42.733209413 +0000 UTC m=+838.195685735 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs") pod "openstack-operator-controller-manager-758b84fd57-x2sxf" (UID: "9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9") : secret "webhook-server-cert" not found Nov 25 21:44:40 crc kubenswrapper[4910]: E1125 21:44:40.086871 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vnnr2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5f64f6f8bb-vwgmq_openstack-operators(28574aa2-4470-4432-b7f0-4b3b52b5f8b9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:40 crc kubenswrapper[4910]: E1125 21:44:40.088927 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" podUID="28574aa2-4470-4432-b7f0-4b3b52b5f8b9" Nov 25 21:44:40 crc kubenswrapper[4910]: E1125 21:44:40.093899 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rvf95,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod mariadb-operator-controller-manager-56bbcc9d85-tkpxw_openstack-operators(44442e02-9c1f-4a6e-bcdd-237b8260638d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:40 crc kubenswrapper[4910]: E1125 21:44:40.097741 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-qc9dc,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ironic-operator-controller-manager-6c548fd776-q9hg4_openstack-operators(2cce005e-33cd-4b63-8798-b0b7eb53ba73): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:40 crc kubenswrapper[4910]: E1125 21:44:40.098425 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" podUID="44442e02-9c1f-4a6e-bcdd-237b8260638d" Nov 25 21:44:40 crc kubenswrapper[4910]: E1125 21:44:40.099663 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:kube-rbac-proxy,Image:quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0,Command:[],Args:[--secure-listen-address=0.0.0.0:8443 --upstream=http://127.0.0.1:8080/ --logtostderr=true --v=0],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{134217728 0} {} BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wsblx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod manila-operator-controller-manager-6546668bfd-z9dd7_openstack-operators(5c6fa310-d85a-4ac3-be15-478635a8c221): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 25 21:44:40 crc kubenswrapper[4910]: E1125 21:44:40.099785 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" podUID="2cce005e-33cd-4b63-8798-b0b7eb53ba73" Nov 25 21:44:40 crc kubenswrapper[4910]: E1125 21:44:40.101627 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" podUID="5c6fa310-d85a-4ac3-be15-478635a8c221" Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.801995 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" event={"ID":"44442e02-9c1f-4a6e-bcdd-237b8260638d","Type":"ContainerStarted","Data":"fe5bf3e287c56d5c8f2b602de955af3eb62163230af407fc04fbe4061dfbbc70"} Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.802855 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" Nov 25 21:44:40 crc kubenswrapper[4910]: E1125 21:44:40.813486 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" podUID="44442e02-9c1f-4a6e-bcdd-237b8260638d" Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.829477 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-4hf76" event={"ID":"c27ac874-a062-4342-9559-a14acbff4c9d","Type":"ContainerStarted","Data":"5358671bdd3f7a64bf4473b2bc4748411f910e22a395b464d7c9ae93e6f51d70"} Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.849494 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-gbgd2" event={"ID":"cd608fcb-14bd-424e-9f6e-c0eea37397ea","Type":"ContainerStarted","Data":"60a2a247af3f0dcc78fc8c5c78cb1daffe7ab5a05750c4a2762dfd2d8385a8ec"} Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.870860 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6c4d7c9757-gpg9j" event={"ID":"5c38fd88-4bb1-4d48-a8d4-fe533cbb2d0c","Type":"ContainerStarted","Data":"3157baacbe891ad8e78fe8a52ed3fe3cf88734357095925f751d5c8f885b15d1"} Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.881943 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6zxdg" event={"ID":"e117033c-b566-4c46-bd57-9e173e88a224","Type":"ContainerStarted","Data":"a9f7608f3155a9a8fdccea1051bb035f989e721d5181199fd732e43c0e5c4027"} Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.892737 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-pqkz6" event={"ID":"6c888013-ea9c-433c-973f-af7c5c22f8c9","Type":"ContainerStarted","Data":"7ab8b5f17262b210fe127782d03ea7366c344812d4122373eeeca5e7db49651c"} Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.894157 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" event={"ID":"2cce005e-33cd-4b63-8798-b0b7eb53ba73","Type":"ContainerStarted","Data":"7dce916e17ad50b25c994a4acff883adad47d0878132b934f2ec271fdbd8cfc0"} Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.894304 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" Nov 25 21:44:40 crc kubenswrapper[4910]: E1125 21:44:40.897626 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" podUID="2cce005e-33cd-4b63-8798-b0b7eb53ba73" Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.924504 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-776b995c47-chsbs" event={"ID":"bd7a08fe-f30e-4a1e-a92d-7c813fd14fa9","Type":"ContainerStarted","Data":"44c3cd790a91e0f54389c94683abb8e9f2999b003c87ab2eb4af3921b29e53ae"} Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.934433 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-t4qfc" event={"ID":"eadae0d9-eee7-42f3-aa0e-c42ef3282f24","Type":"ContainerStarted","Data":"e59f1d4f0f80de0e40238baad9307a53ef9e41c562b0c544be72d309da40077b"} Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.947587 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-czf4h" event={"ID":"2c94f244-a036-47af-8ba4-5dfe41ad5e66","Type":"ContainerStarted","Data":"1f0837ecbb206a4b3033a6b210e0435dbbf24a71033580e4dbd2724e573aa140"} Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.973547 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" event={"ID":"28574aa2-4470-4432-b7f0-4b3b52b5f8b9","Type":"ContainerStarted","Data":"48f69ceb3ab994cbb8c993f8badd4236845470a29ca43cece3d43f5e4a7d659f"} Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.974003 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" Nov 25 21:44:40 crc kubenswrapper[4910]: E1125 21:44:40.976512 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" podUID="28574aa2-4470-4432-b7f0-4b3b52b5f8b9" Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.991779 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" event={"ID":"5c6fa310-d85a-4ac3-be15-478635a8c221","Type":"ContainerStarted","Data":"c678a40c1d48dc0b03397638c99373875f37c65f1961fe4db0368cc1300e7077"} Nov 25 21:44:40 crc kubenswrapper[4910]: I1125 21:44:40.991857 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" Nov 25 21:44:41 crc kubenswrapper[4910]: E1125 21:44:41.000683 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" podUID="5c6fa310-d85a-4ac3-be15-478635a8c221" Nov 25 21:44:41 crc kubenswrapper[4910]: I1125 21:44:41.001212 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-x58hx" event={"ID":"22c915a2-80bf-454b-b0e6-7a5bbafec7a5","Type":"ContainerStarted","Data":"0924a0ac37b898ec178d0fb037ba9f44c6aee4e789bb3561811166551aa74c37"} Nov 25 21:44:41 crc kubenswrapper[4910]: I1125 21:44:41.020872 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-gvsd5" event={"ID":"ef4b8019-398c-453d-9b78-71c340bf2bdd","Type":"ContainerStarted","Data":"052d289c49eb2e5d3f7cc03ea86683aae5e5fab3c346239303e709a9cc450e57"} Nov 25 21:44:42 crc kubenswrapper[4910]: E1125 21:44:42.029730 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" podUID="2cce005e-33cd-4b63-8798-b0b7eb53ba73" Nov 25 21:44:42 crc kubenswrapper[4910]: E1125 21:44:42.029769 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" podUID="44442e02-9c1f-4a6e-bcdd-237b8260638d" Nov 25 21:44:42 crc kubenswrapper[4910]: E1125 21:44:42.029796 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" podUID="5c6fa310-d85a-4ac3-be15-478635a8c221" Nov 25 21:44:42 crc kubenswrapper[4910]: E1125 21:44:42.030523 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" podUID="28574aa2-4470-4432-b7f0-4b3b52b5f8b9" Nov 25 21:44:42 crc kubenswrapper[4910]: I1125 21:44:42.319527 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert\") pod \"infra-operator-controller-manager-57548d458d-9mx4m\" (UID: \"0345d3a7-45fa-4bce-8dcb-4bef18de4b21\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:44:42 crc kubenswrapper[4910]: I1125 21:44:42.325208 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/0345d3a7-45fa-4bce-8dcb-4bef18de4b21-cert\") pod \"infra-operator-controller-manager-57548d458d-9mx4m\" (UID: \"0345d3a7-45fa-4bce-8dcb-4bef18de4b21\") " pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:44:42 crc kubenswrapper[4910]: I1125 21:44:42.364156 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:44:42 crc kubenswrapper[4910]: I1125 21:44:42.522394 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert\") pod \"openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd\" (UID: \"65280dcb-6ac6-443b-88f0-7d3b0dadb4f8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:42 crc kubenswrapper[4910]: I1125 21:44:42.542771 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/65280dcb-6ac6-443b-88f0-7d3b0dadb4f8-cert\") pod \"openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd\" (UID: \"65280dcb-6ac6-443b-88f0-7d3b0dadb4f8\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:42 crc kubenswrapper[4910]: I1125 21:44:42.752181 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:42 crc kubenswrapper[4910]: I1125 21:44:42.830612 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:42 crc kubenswrapper[4910]: I1125 21:44:42.830655 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:42 crc kubenswrapper[4910]: I1125 21:44:42.834370 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-metrics-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:42 crc kubenswrapper[4910]: I1125 21:44:42.834457 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9-webhook-certs\") pod \"openstack-operator-controller-manager-758b84fd57-x2sxf\" (UID: \"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9\") " pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:42 crc kubenswrapper[4910]: I1125 21:44:42.937418 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:43 crc kubenswrapper[4910]: I1125 21:44:43.956151 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf"] Nov 25 21:44:43 crc kubenswrapper[4910]: I1125 21:44:43.973746 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd"] Nov 25 21:44:44 crc kubenswrapper[4910]: I1125 21:44:44.026698 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m"] Nov 25 21:44:45 crc kubenswrapper[4910]: I1125 21:44:45.056855 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" event={"ID":"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9","Type":"ContainerStarted","Data":"aa263c5899642f0baf8f2cfeebfeeb81e6a98d9a16ac78df87b9b384156b70de"} Nov 25 21:44:45 crc kubenswrapper[4910]: I1125 21:44:45.059576 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" event={"ID":"65280dcb-6ac6-443b-88f0-7d3b0dadb4f8","Type":"ContainerStarted","Data":"2f748c2bcd6ad31a1a320215ed46acf86bb8950ca90bcbeea82cf2ab43068450"} Nov 25 21:44:45 crc kubenswrapper[4910]: I1125 21:44:45.061305 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" event={"ID":"0345d3a7-45fa-4bce-8dcb-4bef18de4b21","Type":"ContainerStarted","Data":"001ea7ae3149962ef5bc9273d378180922082969a09eab8cd3e2efe22d877b2a"} Nov 25 21:44:46 crc kubenswrapper[4910]: I1125 21:44:46.667452 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" Nov 25 21:44:46 crc kubenswrapper[4910]: E1125 21:44:46.669817 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" podUID="28574aa2-4470-4432-b7f0-4b3b52b5f8b9" Nov 25 21:44:46 crc kubenswrapper[4910]: I1125 21:44:46.808853 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" Nov 25 21:44:46 crc kubenswrapper[4910]: E1125 21:44:46.813070 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" podUID="2cce005e-33cd-4b63-8798-b0b7eb53ba73" Nov 25 21:44:46 crc kubenswrapper[4910]: I1125 21:44:46.878965 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" Nov 25 21:44:46 crc kubenswrapper[4910]: E1125 21:44:46.881029 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" podUID="5c6fa310-d85a-4ac3-be15-478635a8c221" Nov 25 21:44:46 crc kubenswrapper[4910]: I1125 21:44:46.935889 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" Nov 25 21:44:46 crc kubenswrapper[4910]: E1125 21:44:46.938797 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-rbac-proxy\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/kube-rbac-proxy:v0.16.0\\\"\"" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" podUID="44442e02-9c1f-4a6e-bcdd-237b8260638d" Nov 25 21:44:52 crc kubenswrapper[4910]: I1125 21:44:52.120265 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q" event={"ID":"f3384730-e8d8-4e36-9f3e-8e2dbf3176cb","Type":"ContainerStarted","Data":"88945cec7c5a83bb1d05c067409c889d54ea8ed8c1becb550876e4fb0bbef92a"} Nov 25 21:44:52 crc kubenswrapper[4910]: I1125 21:44:52.125146 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-dvpfs" event={"ID":"838225a6-f682-4181-aeab-073767c8d49a","Type":"ContainerStarted","Data":"4f0eba1514398425a440de4b8aa898ff3f77470b45967a1f4d49278f0781ec4c"} Nov 25 21:44:52 crc kubenswrapper[4910]: I1125 21:44:52.130007 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" event={"ID":"9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9","Type":"ContainerStarted","Data":"8dc7cff4f906e32139375525cc466700124dd3457c20ce08798f617c4a886b39"} Nov 25 21:44:52 crc kubenswrapper[4910]: I1125 21:44:52.130175 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:44:52 crc kubenswrapper[4910]: I1125 21:44:52.146080 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-dvpfs" podStartSLOduration=5.082727123 podStartE2EDuration="26.146060712s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:28.285200533 +0000 UTC m=+823.747676855" lastFinishedPulling="2025-11-25 21:44:49.348534122 +0000 UTC m=+844.811010444" observedRunningTime="2025-11-25 21:44:52.142735513 +0000 UTC m=+847.605211835" watchObservedRunningTime="2025-11-25 21:44:52.146060712 +0000 UTC m=+847.608537034" Nov 25 21:44:52 crc kubenswrapper[4910]: I1125 21:44:52.190574 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" podStartSLOduration=26.190549902 podStartE2EDuration="26.190549902s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:44:52.183951247 +0000 UTC m=+847.646427569" watchObservedRunningTime="2025-11-25 21:44:52.190549902 +0000 UTC m=+847.653026224" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.139850 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" event={"ID":"0345d3a7-45fa-4bce-8dcb-4bef18de4b21","Type":"ContainerStarted","Data":"a19e5e70c8f1076aae29b866f1db5967047dfc6c3ad43543320129e5c573d87a"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.139939 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" event={"ID":"0345d3a7-45fa-4bce-8dcb-4bef18de4b21","Type":"ContainerStarted","Data":"782caae2c28c08b368870d31dbab014f7fd938b4e292b430c5f4ff4deff7d3bf"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.139959 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.143061 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-6c4d7c9757-gpg9j" event={"ID":"5c38fd88-4bb1-4d48-a8d4-fe533cbb2d0c","Type":"ContainerStarted","Data":"8c67d0a70ddb095dfd63b275d54a1f51e01872dbe12956947b60c3ea667ad82b"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.143339 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-6c4d7c9757-gpg9j" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.145136 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-776b995c47-chsbs" event={"ID":"bd7a08fe-f30e-4a1e-a92d-7c813fd14fa9","Type":"ContainerStarted","Data":"abe0fb1c3e67dc130c7f6b8b3dbc73aed0b909e400c6f3bc96efbf0cae139975"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.145281 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-776b995c47-chsbs" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.147314 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-776b995c47-chsbs" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.148309 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-x58hx" event={"ID":"22c915a2-80bf-454b-b0e6-7a5bbafec7a5","Type":"ContainerStarted","Data":"3788435580d87fd5aee7c9abe27525123d1c7e753d9bac302d10d92d5a03af44"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.148643 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-x58hx" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.151054 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-gvsd5" event={"ID":"ef4b8019-398c-453d-9b78-71c340bf2bdd","Type":"ContainerStarted","Data":"dd279f5c34847bd4ca883df66a0b28c4d11bcd06a20d76ca5325356ffaf726b6"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.151201 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-x58hx" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.151411 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-gvsd5" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.152498 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" event={"ID":"65280dcb-6ac6-443b-88f0-7d3b0dadb4f8","Type":"ContainerStarted","Data":"5570f2e7933b735f384fcff5bfce5208d62c023c9d81933f6cd9c1911dbcb645"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.153372 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-gvsd5" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.153824 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk" event={"ID":"bb7d559d-1779-400f-b556-1adbb0c61b60","Type":"ContainerStarted","Data":"669977bf25603ca679143d25b6abdfefb8d4192fb2fabcaec3a96c8e70ff8dc5"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.155588 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-4hf76" event={"ID":"c27ac874-a062-4342-9559-a14acbff4c9d","Type":"ContainerStarted","Data":"35a05bc199fd5ddab1dd49c42fc38b270a6d279cc76a6a5eeffae178c39ac3f0"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.155867 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-4hf76" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.157466 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-4hf76" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.161925 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-6c4d7c9757-gpg9j" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.163652 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-gbgd2" event={"ID":"cd608fcb-14bd-424e-9f6e-c0eea37397ea","Type":"ContainerStarted","Data":"e76e673f825722605fc7acf631972332ec49b312ca2e9f1d98e17c06f3fbf6f9"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.165387 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-gbgd2" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.169170 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6zxdg" event={"ID":"e117033c-b566-4c46-bd57-9e173e88a224","Type":"ContainerStarted","Data":"50ffb8b350dc4c7bdba8e45f13cd0af87c4fc79b18e57df74814a017e38dae3e"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.170176 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6zxdg" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.170358 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-gbgd2" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.171471 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x" event={"ID":"ad233905-ebeb-4698-8261-d8a395be75d7","Type":"ContainerStarted","Data":"67715f57ce78435fb7a7ca453bcc7ccc65d88c859acd262700d2b94931a886d2"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.172581 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6zxdg" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.172823 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r" event={"ID":"ba6c852e-59d0-4e5a-8967-3502457d62ec","Type":"ContainerStarted","Data":"7f1bf9820edf4e5b7d5fd7fbe3c80a0e7c6f89d54eec4c011b216a445932d406"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.174030 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-t4qfc" event={"ID":"eadae0d9-eee7-42f3-aa0e-c42ef3282f24","Type":"ContainerStarted","Data":"9a28de78cda0a4d87d255122229b22200d2f0da9cc71aa7507eb4840e8f8a42b"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.174767 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-t4qfc" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.177099 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-t4qfc" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.177775 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-pqkz6" event={"ID":"6c888013-ea9c-433c-973f-af7c5c22f8c9","Type":"ContainerStarted","Data":"610849473c0a8c613e3a724637f93b551663112a121add6e4ce291e3ddca0de1"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.178769 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-pqkz6" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.180659 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" podStartSLOduration=20.276516644 podStartE2EDuration="27.180646811s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:44.66696547 +0000 UTC m=+840.129441792" lastFinishedPulling="2025-11-25 21:44:51.571095637 +0000 UTC m=+847.033571959" observedRunningTime="2025-11-25 21:44:53.175125204 +0000 UTC m=+848.637601526" watchObservedRunningTime="2025-11-25 21:44:53.180646811 +0000 UTC m=+848.643123133" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.188882 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-pqkz6" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.195156 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q" event={"ID":"f3384730-e8d8-4e36-9f3e-8e2dbf3176cb","Type":"ContainerStarted","Data":"77e8a71d6895f5544ff1846da01390a56b017c8458358a8600b598a57769de9a"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.196027 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.197516 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x" event={"ID":"0ac69701-2b03-4d80-bb8f-8f46acb193e4","Type":"ContainerStarted","Data":"989da16bac382a8a416f500a443555fb0738983874c310b62b0f3d6484225f63"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.202071 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-czf4h" event={"ID":"2c94f244-a036-47af-8ba4-5dfe41ad5e66","Type":"ContainerStarted","Data":"940ebba88e9ba09a17246a4a8d0e8ed2a2a951eb17514beecb751035e011769d"} Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.202143 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-czf4h" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.220593 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-czf4h" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.239436 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5fdfd5b6b5-t4qfc" podStartSLOduration=3.8052689920000002 podStartE2EDuration="27.23941396s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:28.119684571 +0000 UTC m=+823.582160893" lastFinishedPulling="2025-11-25 21:44:51.553829529 +0000 UTC m=+847.016305861" observedRunningTime="2025-11-25 21:44:53.208282764 +0000 UTC m=+848.670759086" watchObservedRunningTime="2025-11-25 21:44:53.23941396 +0000 UTC m=+848.701890282" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.257725 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-6c4d7c9757-gpg9j" podStartSLOduration=3.8120262670000002 podStartE2EDuration="27.257697235s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:28.111425888 +0000 UTC m=+823.573902210" lastFinishedPulling="2025-11-25 21:44:51.557096846 +0000 UTC m=+847.019573178" observedRunningTime="2025-11-25 21:44:53.243666123 +0000 UTC m=+848.706142435" watchObservedRunningTime="2025-11-25 21:44:53.257697235 +0000 UTC m=+848.720173557" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.409551 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-776b995c47-chsbs" podStartSLOduration=3.552827824 podStartE2EDuration="27.409526303s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:27.685119161 +0000 UTC m=+823.147595483" lastFinishedPulling="2025-11-25 21:44:51.54181764 +0000 UTC m=+847.004293962" observedRunningTime="2025-11-25 21:44:53.307175287 +0000 UTC m=+848.769651609" watchObservedRunningTime="2025-11-25 21:44:53.409526303 +0000 UTC m=+848.872002625" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.467707 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7d9dfd778-x58hx" podStartSLOduration=3.307269941 podStartE2EDuration="27.467662785s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:27.424441448 +0000 UTC m=+822.886917760" lastFinishedPulling="2025-11-25 21:44:51.584834282 +0000 UTC m=+847.047310604" observedRunningTime="2025-11-25 21:44:53.340377048 +0000 UTC m=+848.802853370" watchObservedRunningTime="2025-11-25 21:44:53.467662785 +0000 UTC m=+848.930139117" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.483040 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-78b4bc895b-gbgd2" podStartSLOduration=3.878852966 podStartE2EDuration="27.483012832s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:27.937649774 +0000 UTC m=+823.400126096" lastFinishedPulling="2025-11-25 21:44:51.54180964 +0000 UTC m=+847.004285962" observedRunningTime="2025-11-25 21:44:53.39284423 +0000 UTC m=+848.855320552" watchObservedRunningTime="2025-11-25 21:44:53.483012832 +0000 UTC m=+848.945489154" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.488154 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-859b6ccc6-gvsd5" podStartSLOduration=4.133074832 podStartE2EDuration="27.488132228s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:27.645883895 +0000 UTC m=+823.108360217" lastFinishedPulling="2025-11-25 21:44:51.000941281 +0000 UTC m=+846.463417613" observedRunningTime="2025-11-25 21:44:53.439124258 +0000 UTC m=+848.901600590" watchObservedRunningTime="2025-11-25 21:44:53.488132228 +0000 UTC m=+848.950608550" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.538072 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-546d4bdf48-4hf76" podStartSLOduration=4.119443616 podStartE2EDuration="27.538039311s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:28.122463685 +0000 UTC m=+823.584940007" lastFinishedPulling="2025-11-25 21:44:51.54105937 +0000 UTC m=+847.003535702" observedRunningTime="2025-11-25 21:44:53.467012538 +0000 UTC m=+848.929488860" watchObservedRunningTime="2025-11-25 21:44:53.538039311 +0000 UTC m=+849.000515633" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.560736 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-76cc84c6bb-6zxdg" podStartSLOduration=6.602427261 podStartE2EDuration="27.560696542s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:28.260899599 +0000 UTC m=+823.723375921" lastFinishedPulling="2025-11-25 21:44:49.21916889 +0000 UTC m=+844.681645202" observedRunningTime="2025-11-25 21:44:53.494714062 +0000 UTC m=+848.957190384" watchObservedRunningTime="2025-11-25 21:44:53.560696542 +0000 UTC m=+849.023172874" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.609090 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-68c6d99b8f-pqkz6" podStartSLOduration=4.539727736 podStartE2EDuration="27.609063035s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:27.931113878 +0000 UTC m=+823.393590200" lastFinishedPulling="2025-11-25 21:44:51.000449167 +0000 UTC m=+846.462925499" observedRunningTime="2025-11-25 21:44:53.554779545 +0000 UTC m=+849.017255867" watchObservedRunningTime="2025-11-25 21:44:53.609063035 +0000 UTC m=+849.071539357" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.610431 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-64cdc6ff96-czf4h" podStartSLOduration=4.135988741 podStartE2EDuration="27.610426442s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:28.111558632 +0000 UTC m=+823.574034954" lastFinishedPulling="2025-11-25 21:44:51.585996333 +0000 UTC m=+847.048472655" observedRunningTime="2025-11-25 21:44:53.599414379 +0000 UTC m=+849.061890701" watchObservedRunningTime="2025-11-25 21:44:53.610426442 +0000 UTC m=+849.072902764" Nov 25 21:44:53 crc kubenswrapper[4910]: I1125 21:44:53.634806 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q" podStartSLOduration=6.59899083 podStartE2EDuration="27.634783578s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:28.311764528 +0000 UTC m=+823.774240850" lastFinishedPulling="2025-11-25 21:44:49.347557266 +0000 UTC m=+844.810033598" observedRunningTime="2025-11-25 21:44:53.629911289 +0000 UTC m=+849.092387621" watchObservedRunningTime="2025-11-25 21:44:53.634783578 +0000 UTC m=+849.097259900" Nov 25 21:44:54 crc kubenswrapper[4910]: I1125 21:44:54.220181 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x" event={"ID":"0ac69701-2b03-4d80-bb8f-8f46acb193e4","Type":"ContainerStarted","Data":"18cb366d68a03eb56270473065229e9e40a84b4352395aaed8a56b4e7b5db936"} Nov 25 21:44:54 crc kubenswrapper[4910]: I1125 21:44:54.222049 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" event={"ID":"65280dcb-6ac6-443b-88f0-7d3b0dadb4f8","Type":"ContainerStarted","Data":"0f717c6f49b35f84ea876872cf4fa54f48166a1841f604a7d17abf283527e131"} Nov 25 21:44:54 crc kubenswrapper[4910]: I1125 21:44:54.225009 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk" event={"ID":"bb7d559d-1779-400f-b556-1adbb0c61b60","Type":"ContainerStarted","Data":"76cf74d93f42435bfec14b92f49251aec0ba135729e0998d386eed2d6d025c48"} Nov 25 21:44:57 crc kubenswrapper[4910]: I1125 21:44:57.195465 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-78f8948974-wsd5q" Nov 25 21:44:58 crc kubenswrapper[4910]: I1125 21:44:58.256238 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:58 crc kubenswrapper[4910]: I1125 21:44:58.263056 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" Nov 25 21:44:58 crc kubenswrapper[4910]: I1125 21:44:58.282313 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd" podStartSLOduration=25.302737432 podStartE2EDuration="32.282293669s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:44.672294393 +0000 UTC m=+840.134770715" lastFinishedPulling="2025-11-25 21:44:51.65185063 +0000 UTC m=+847.114326952" observedRunningTime="2025-11-25 21:44:58.281862778 +0000 UTC m=+853.744339140" watchObservedRunningTime="2025-11-25 21:44:58.282293669 +0000 UTC m=+853.744769981" Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.143495 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj"] Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.145757 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.147915 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.148478 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.153679 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj"] Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.298237 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb61b0ae-981a-40d7-b94d-ecdec564363d-config-volume\") pod \"collect-profiles-29401785-5vzqj\" (UID: \"cb61b0ae-981a-40d7-b94d-ecdec564363d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.298667 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb61b0ae-981a-40d7-b94d-ecdec564363d-secret-volume\") pod \"collect-profiles-29401785-5vzqj\" (UID: \"cb61b0ae-981a-40d7-b94d-ecdec564363d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.299507 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5zgb\" (UniqueName: \"kubernetes.io/projected/cb61b0ae-981a-40d7-b94d-ecdec564363d-kube-api-access-h5zgb\") pod \"collect-profiles-29401785-5vzqj\" (UID: \"cb61b0ae-981a-40d7-b94d-ecdec564363d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.401107 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb61b0ae-981a-40d7-b94d-ecdec564363d-secret-volume\") pod \"collect-profiles-29401785-5vzqj\" (UID: \"cb61b0ae-981a-40d7-b94d-ecdec564363d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.401153 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5zgb\" (UniqueName: \"kubernetes.io/projected/cb61b0ae-981a-40d7-b94d-ecdec564363d-kube-api-access-h5zgb\") pod \"collect-profiles-29401785-5vzqj\" (UID: \"cb61b0ae-981a-40d7-b94d-ecdec564363d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.401219 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb61b0ae-981a-40d7-b94d-ecdec564363d-config-volume\") pod \"collect-profiles-29401785-5vzqj\" (UID: \"cb61b0ae-981a-40d7-b94d-ecdec564363d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.402321 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb61b0ae-981a-40d7-b94d-ecdec564363d-config-volume\") pod \"collect-profiles-29401785-5vzqj\" (UID: \"cb61b0ae-981a-40d7-b94d-ecdec564363d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.406508 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb61b0ae-981a-40d7-b94d-ecdec564363d-secret-volume\") pod \"collect-profiles-29401785-5vzqj\" (UID: \"cb61b0ae-981a-40d7-b94d-ecdec564363d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.417654 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5zgb\" (UniqueName: \"kubernetes.io/projected/cb61b0ae-981a-40d7-b94d-ecdec564363d-kube-api-access-h5zgb\") pod \"collect-profiles-29401785-5vzqj\" (UID: \"cb61b0ae-981a-40d7-b94d-ecdec564363d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.513145 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" Nov 25 21:45:00 crc kubenswrapper[4910]: I1125 21:45:00.921175 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj"] Nov 25 21:45:00 crc kubenswrapper[4910]: W1125 21:45:00.922794 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcb61b0ae_981a_40d7_b94d_ecdec564363d.slice/crio-9a6bc0c44b7af4ec95d8cfa9477dfa52b7447d9daffa4e2e0f63a5915423f582 WatchSource:0}: Error finding container 9a6bc0c44b7af4ec95d8cfa9477dfa52b7447d9daffa4e2e0f63a5915423f582: Status 404 returned error can't find the container with id 9a6bc0c44b7af4ec95d8cfa9477dfa52b7447d9daffa4e2e0f63a5915423f582 Nov 25 21:45:01 crc kubenswrapper[4910]: I1125 21:45:01.300492 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" event={"ID":"44442e02-9c1f-4a6e-bcdd-237b8260638d","Type":"ContainerStarted","Data":"2899f63dfa16f5319d232a29b39e442fbe672278dbca752bddad55fa3e2f033d"} Nov 25 21:45:01 crc kubenswrapper[4910]: I1125 21:45:01.301716 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" event={"ID":"28574aa2-4470-4432-b7f0-4b3b52b5f8b9","Type":"ContainerStarted","Data":"18ac041710a42756989c6dfe54672e9a90aa03de5165be5176fb59de5358c53f"} Nov 25 21:45:01 crc kubenswrapper[4910]: I1125 21:45:01.304207 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x" event={"ID":"ad233905-ebeb-4698-8261-d8a395be75d7","Type":"ContainerStarted","Data":"04680aa6f11c08118e66fa6bb05904c4236e3610bacb238d34667f980323d3c6"} Nov 25 21:45:01 crc kubenswrapper[4910]: I1125 21:45:01.306810 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" event={"ID":"2cce005e-33cd-4b63-8798-b0b7eb53ba73","Type":"ContainerStarted","Data":"3ec5e30f27f48e47de1a8686d2e64ea3916926b1ae77930053c1d744d42dce5b"} Nov 25 21:45:01 crc kubenswrapper[4910]: I1125 21:45:01.309011 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" event={"ID":"5c6fa310-d85a-4ac3-be15-478635a8c221","Type":"ContainerStarted","Data":"d2c57d429faa8ef83716d03af111628c2611f31bb67f1ab45f499c7e76725838"} Nov 25 21:45:01 crc kubenswrapper[4910]: I1125 21:45:01.310416 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r" event={"ID":"ba6c852e-59d0-4e5a-8967-3502457d62ec","Type":"ContainerStarted","Data":"ab57fb7c922c198d465dde8971808dd2086ce31ccbffeb63b66db0c826d9afe8"} Nov 25 21:45:01 crc kubenswrapper[4910]: I1125 21:45:01.313661 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" event={"ID":"cb61b0ae-981a-40d7-b94d-ecdec564363d","Type":"ContainerStarted","Data":"456e43156a749002a3a842f5e6dfbf69e50aad12da6f293a187bdbb957815b9d"} Nov 25 21:45:01 crc kubenswrapper[4910]: I1125 21:45:01.313688 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" event={"ID":"cb61b0ae-981a-40d7-b94d-ecdec564363d","Type":"ContainerStarted","Data":"9a6bc0c44b7af4ec95d8cfa9477dfa52b7447d9daffa4e2e0f63a5915423f582"} Nov 25 21:45:01 crc kubenswrapper[4910]: I1125 21:45:01.314076 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk" Nov 25 21:45:01 crc kubenswrapper[4910]: I1125 21:45:01.315435 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk" Nov 25 21:45:01 crc kubenswrapper[4910]: I1125 21:45:01.335148 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-74fcfc6d4b-2jdvk" podStartSLOduration=12.083692887 podStartE2EDuration="35.335130533s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:28.323048971 +0000 UTC m=+823.785525293" lastFinishedPulling="2025-11-25 21:44:51.574486617 +0000 UTC m=+847.036962939" observedRunningTime="2025-11-25 21:45:01.331129987 +0000 UTC m=+856.793606309" watchObservedRunningTime="2025-11-25 21:45:01.335130533 +0000 UTC m=+856.797606885" Nov 25 21:45:02 crc kubenswrapper[4910]: I1125 21:45:02.321751 4910 generic.go:334] "Generic (PLEG): container finished" podID="cb61b0ae-981a-40d7-b94d-ecdec564363d" containerID="456e43156a749002a3a842f5e6dfbf69e50aad12da6f293a187bdbb957815b9d" exitCode=0 Nov 25 21:45:02 crc kubenswrapper[4910]: I1125 21:45:02.321838 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" event={"ID":"cb61b0ae-981a-40d7-b94d-ecdec564363d","Type":"ContainerDied","Data":"456e43156a749002a3a842f5e6dfbf69e50aad12da6f293a187bdbb957815b9d"} Nov 25 21:45:02 crc kubenswrapper[4910]: I1125 21:45:02.365118 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-6c548fd776-q9hg4" podStartSLOduration=24.708828976 podStartE2EDuration="36.365090628s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:27.937634193 +0000 UTC m=+823.400110515" lastFinishedPulling="2025-11-25 21:44:39.593895845 +0000 UTC m=+835.056372167" observedRunningTime="2025-11-25 21:45:02.355445513 +0000 UTC m=+857.817921845" watchObservedRunningTime="2025-11-25 21:45:02.365090628 +0000 UTC m=+857.827566970" Nov 25 21:45:02 crc kubenswrapper[4910]: I1125 21:45:02.369681 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-57548d458d-9mx4m" Nov 25 21:45:02 crc kubenswrapper[4910]: I1125 21:45:02.381766 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x" podStartSLOduration=13.002894436 podStartE2EDuration="36.38174908s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:28.275840021 +0000 UTC m=+823.738316343" lastFinishedPulling="2025-11-25 21:44:51.654694665 +0000 UTC m=+847.117170987" observedRunningTime="2025-11-25 21:45:02.378665649 +0000 UTC m=+857.841141971" watchObservedRunningTime="2025-11-25 21:45:02.38174908 +0000 UTC m=+857.844225402" Nov 25 21:45:02 crc kubenswrapper[4910]: I1125 21:45:02.399776 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r" podStartSLOduration=13.043444922 podStartE2EDuration="36.399748888s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:28.302040266 +0000 UTC m=+823.764516588" lastFinishedPulling="2025-11-25 21:44:51.658344232 +0000 UTC m=+847.120820554" observedRunningTime="2025-11-25 21:45:02.396262046 +0000 UTC m=+857.858738388" watchObservedRunningTime="2025-11-25 21:45:02.399748888 +0000 UTC m=+857.862225210" Nov 25 21:45:02 crc kubenswrapper[4910]: I1125 21:45:02.418964 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5f64f6f8bb-vwgmq" podStartSLOduration=24.775065869 podStartE2EDuration="36.418938847s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:27.934262463 +0000 UTC m=+823.396738785" lastFinishedPulling="2025-11-25 21:44:39.578135441 +0000 UTC m=+835.040611763" observedRunningTime="2025-11-25 21:45:02.412617119 +0000 UTC m=+857.875093451" watchObservedRunningTime="2025-11-25 21:45:02.418938847 +0000 UTC m=+857.881415189" Nov 25 21:45:02 crc kubenswrapper[4910]: I1125 21:45:02.435831 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-56bbcc9d85-tkpxw" podStartSLOduration=25.069528054 podStartE2EDuration="36.435795974s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:28.114116781 +0000 UTC m=+823.576593103" lastFinishedPulling="2025-11-25 21:44:39.480384701 +0000 UTC m=+834.942861023" observedRunningTime="2025-11-25 21:45:02.430084443 +0000 UTC m=+857.892560775" watchObservedRunningTime="2025-11-25 21:45:02.435795974 +0000 UTC m=+857.898272306" Nov 25 21:45:02 crc kubenswrapper[4910]: I1125 21:45:02.459334 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x" podStartSLOduration=12.86380327 podStartE2EDuration="36.459309488s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:28.125031844 +0000 UTC m=+823.587508166" lastFinishedPulling="2025-11-25 21:44:51.720538062 +0000 UTC m=+847.183014384" observedRunningTime="2025-11-25 21:45:02.451732797 +0000 UTC m=+857.914209139" watchObservedRunningTime="2025-11-25 21:45:02.459309488 +0000 UTC m=+857.921785810" Nov 25 21:45:02 crc kubenswrapper[4910]: I1125 21:45:02.480231 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-6546668bfd-z9dd7" podStartSLOduration=25.009428112 podStartE2EDuration="36.480207343s" podCreationTimestamp="2025-11-25 21:44:26 +0000 UTC" firstStartedPulling="2025-11-25 21:44:28.113923626 +0000 UTC m=+823.576399948" lastFinishedPulling="2025-11-25 21:44:39.584702847 +0000 UTC m=+835.047179179" observedRunningTime="2025-11-25 21:45:02.469675363 +0000 UTC m=+857.932151685" watchObservedRunningTime="2025-11-25 21:45:02.480207343 +0000 UTC m=+857.942683665" Nov 25 21:45:02 crc kubenswrapper[4910]: I1125 21:45:02.944088 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-758b84fd57-x2sxf" Nov 25 21:45:03 crc kubenswrapper[4910]: I1125 21:45:03.613904 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" Nov 25 21:45:03 crc kubenswrapper[4910]: I1125 21:45:03.761951 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb61b0ae-981a-40d7-b94d-ecdec564363d-secret-volume\") pod \"cb61b0ae-981a-40d7-b94d-ecdec564363d\" (UID: \"cb61b0ae-981a-40d7-b94d-ecdec564363d\") " Nov 25 21:45:03 crc kubenswrapper[4910]: I1125 21:45:03.762100 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb61b0ae-981a-40d7-b94d-ecdec564363d-config-volume\") pod \"cb61b0ae-981a-40d7-b94d-ecdec564363d\" (UID: \"cb61b0ae-981a-40d7-b94d-ecdec564363d\") " Nov 25 21:45:03 crc kubenswrapper[4910]: I1125 21:45:03.762128 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5zgb\" (UniqueName: \"kubernetes.io/projected/cb61b0ae-981a-40d7-b94d-ecdec564363d-kube-api-access-h5zgb\") pod \"cb61b0ae-981a-40d7-b94d-ecdec564363d\" (UID: \"cb61b0ae-981a-40d7-b94d-ecdec564363d\") " Nov 25 21:45:03 crc kubenswrapper[4910]: I1125 21:45:03.763692 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb61b0ae-981a-40d7-b94d-ecdec564363d-config-volume" (OuterVolumeSpecName: "config-volume") pod "cb61b0ae-981a-40d7-b94d-ecdec564363d" (UID: "cb61b0ae-981a-40d7-b94d-ecdec564363d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:45:03 crc kubenswrapper[4910]: I1125 21:45:03.769419 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb61b0ae-981a-40d7-b94d-ecdec564363d-kube-api-access-h5zgb" (OuterVolumeSpecName: "kube-api-access-h5zgb") pod "cb61b0ae-981a-40d7-b94d-ecdec564363d" (UID: "cb61b0ae-981a-40d7-b94d-ecdec564363d"). InnerVolumeSpecName "kube-api-access-h5zgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:45:03 crc kubenswrapper[4910]: I1125 21:45:03.773364 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb61b0ae-981a-40d7-b94d-ecdec564363d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cb61b0ae-981a-40d7-b94d-ecdec564363d" (UID: "cb61b0ae-981a-40d7-b94d-ecdec564363d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:45:03 crc kubenswrapper[4910]: I1125 21:45:03.864273 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cb61b0ae-981a-40d7-b94d-ecdec564363d-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 21:45:03 crc kubenswrapper[4910]: I1125 21:45:03.864315 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5zgb\" (UniqueName: \"kubernetes.io/projected/cb61b0ae-981a-40d7-b94d-ecdec564363d-kube-api-access-h5zgb\") on node \"crc\" DevicePath \"\"" Nov 25 21:45:03 crc kubenswrapper[4910]: I1125 21:45:03.864328 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cb61b0ae-981a-40d7-b94d-ecdec564363d-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 21:45:04 crc kubenswrapper[4910]: I1125 21:45:04.338667 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" event={"ID":"cb61b0ae-981a-40d7-b94d-ecdec564363d","Type":"ContainerDied","Data":"9a6bc0c44b7af4ec95d8cfa9477dfa52b7447d9daffa4e2e0f63a5915423f582"} Nov 25 21:45:04 crc kubenswrapper[4910]: I1125 21:45:04.338748 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a6bc0c44b7af4ec95d8cfa9477dfa52b7447d9daffa4e2e0f63a5915423f582" Nov 25 21:45:04 crc kubenswrapper[4910]: I1125 21:45:04.338860 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj" Nov 25 21:45:06 crc kubenswrapper[4910]: I1125 21:45:06.997095 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x" Nov 25 21:45:06 crc kubenswrapper[4910]: I1125 21:45:06.999622 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-555bbdd45-7f94x" Nov 25 21:45:07 crc kubenswrapper[4910]: I1125 21:45:07.217295 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r" Nov 25 21:45:07 crc kubenswrapper[4910]: I1125 21:45:07.223933 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-5f8c65bbfc-wc77r" Nov 25 21:45:07 crc kubenswrapper[4910]: I1125 21:45:07.287487 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x" Nov 25 21:45:07 crc kubenswrapper[4910]: I1125 21:45:07.291144 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-656dcb59d4-g9p8x" Nov 25 21:45:23 crc kubenswrapper[4910]: I1125 21:45:23.099278 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:45:23 crc kubenswrapper[4910]: I1125 21:45:23.100172 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.084054 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xsmgt"] Nov 25 21:45:25 crc kubenswrapper[4910]: E1125 21:45:25.084506 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb61b0ae-981a-40d7-b94d-ecdec564363d" containerName="collect-profiles" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.084524 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb61b0ae-981a-40d7-b94d-ecdec564363d" containerName="collect-profiles" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.084698 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb61b0ae-981a-40d7-b94d-ecdec564363d" containerName="collect-profiles" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.086024 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.100082 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xsmgt"] Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.174451 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cfcad7b-815e-4079-bbf1-f1dd84641d31-utilities\") pod \"community-operators-xsmgt\" (UID: \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\") " pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.174502 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljlv4\" (UniqueName: \"kubernetes.io/projected/5cfcad7b-815e-4079-bbf1-f1dd84641d31-kube-api-access-ljlv4\") pod \"community-operators-xsmgt\" (UID: \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\") " pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.174822 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cfcad7b-815e-4079-bbf1-f1dd84641d31-catalog-content\") pod \"community-operators-xsmgt\" (UID: \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\") " pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.276324 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cfcad7b-815e-4079-bbf1-f1dd84641d31-utilities\") pod \"community-operators-xsmgt\" (UID: \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\") " pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.276384 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljlv4\" (UniqueName: \"kubernetes.io/projected/5cfcad7b-815e-4079-bbf1-f1dd84641d31-kube-api-access-ljlv4\") pod \"community-operators-xsmgt\" (UID: \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\") " pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.276479 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cfcad7b-815e-4079-bbf1-f1dd84641d31-catalog-content\") pod \"community-operators-xsmgt\" (UID: \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\") " pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.277071 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cfcad7b-815e-4079-bbf1-f1dd84641d31-catalog-content\") pod \"community-operators-xsmgt\" (UID: \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\") " pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.277790 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cfcad7b-815e-4079-bbf1-f1dd84641d31-utilities\") pod \"community-operators-xsmgt\" (UID: \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\") " pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.297395 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljlv4\" (UniqueName: \"kubernetes.io/projected/5cfcad7b-815e-4079-bbf1-f1dd84641d31-kube-api-access-ljlv4\") pod \"community-operators-xsmgt\" (UID: \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\") " pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.469035 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:25 crc kubenswrapper[4910]: I1125 21:45:25.794508 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xsmgt"] Nov 25 21:45:26 crc kubenswrapper[4910]: I1125 21:45:26.507400 4910 generic.go:334] "Generic (PLEG): container finished" podID="5cfcad7b-815e-4079-bbf1-f1dd84641d31" containerID="8c89f54d0acd62c989899fd5db43ca2c5e26b8d30bb0163e32f81d27cd08fb75" exitCode=0 Nov 25 21:45:26 crc kubenswrapper[4910]: I1125 21:45:26.507576 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xsmgt" event={"ID":"5cfcad7b-815e-4079-bbf1-f1dd84641d31","Type":"ContainerDied","Data":"8c89f54d0acd62c989899fd5db43ca2c5e26b8d30bb0163e32f81d27cd08fb75"} Nov 25 21:45:26 crc kubenswrapper[4910]: I1125 21:45:26.507772 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xsmgt" event={"ID":"5cfcad7b-815e-4079-bbf1-f1dd84641d31","Type":"ContainerStarted","Data":"c5d1e6980929e649f04f944db778b6c57a26da80eada61bc0f9cbcc8d5df5c66"} Nov 25 21:45:26 crc kubenswrapper[4910]: I1125 21:45:26.510660 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 21:45:26 crc kubenswrapper[4910]: I1125 21:45:26.842400 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jdt8c"] Nov 25 21:45:26 crc kubenswrapper[4910]: I1125 21:45:26.843877 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-jdt8c" Nov 25 21:45:26 crc kubenswrapper[4910]: W1125 21:45:26.845483 4910 reflector.go:561] object-"openstack"/"dnsmasq-dns-dockercfg-zxqm9": failed to list *v1.Secret: secrets "dnsmasq-dns-dockercfg-zxqm9" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 25 21:45:26 crc kubenswrapper[4910]: E1125 21:45:26.845544 4910 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"dnsmasq-dns-dockercfg-zxqm9\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"dnsmasq-dns-dockercfg-zxqm9\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 21:45:26 crc kubenswrapper[4910]: I1125 21:45:26.845714 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 25 21:45:26 crc kubenswrapper[4910]: I1125 21:45:26.846083 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 25 21:45:26 crc kubenswrapper[4910]: W1125 21:45:26.847491 4910 reflector.go:561] object-"openstack"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 25 21:45:26 crc kubenswrapper[4910]: E1125 21:45:26.847538 4910 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 25 21:45:26 crc kubenswrapper[4910]: I1125 21:45:26.865085 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jdt8c"] Nov 25 21:45:26 crc kubenswrapper[4910]: I1125 21:45:26.888370 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-sjqwg"] Nov 25 21:45:26 crc kubenswrapper[4910]: I1125 21:45:26.890443 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" Nov 25 21:45:26 crc kubenswrapper[4910]: I1125 21:45:26.894768 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-sjqwg"] Nov 25 21:45:26 crc kubenswrapper[4910]: I1125 21:45:26.902789 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.008886 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8cbx\" (UniqueName: \"kubernetes.io/projected/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-kube-api-access-m8cbx\") pod \"dnsmasq-dns-675f4bcbfc-jdt8c\" (UID: \"eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jdt8c" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.008971 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/449233b3-fd99-44f8-b93c-eedacc6817e1-config\") pod \"dnsmasq-dns-78dd6ddcc-sjqwg\" (UID: \"449233b3-fd99-44f8-b93c-eedacc6817e1\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.009033 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqjqt\" (UniqueName: \"kubernetes.io/projected/449233b3-fd99-44f8-b93c-eedacc6817e1-kube-api-access-dqjqt\") pod \"dnsmasq-dns-78dd6ddcc-sjqwg\" (UID: \"449233b3-fd99-44f8-b93c-eedacc6817e1\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.009057 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/449233b3-fd99-44f8-b93c-eedacc6817e1-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-sjqwg\" (UID: \"449233b3-fd99-44f8-b93c-eedacc6817e1\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.009124 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-config\") pod \"dnsmasq-dns-675f4bcbfc-jdt8c\" (UID: \"eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jdt8c" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.110453 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-config\") pod \"dnsmasq-dns-675f4bcbfc-jdt8c\" (UID: \"eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jdt8c" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.110525 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8cbx\" (UniqueName: \"kubernetes.io/projected/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-kube-api-access-m8cbx\") pod \"dnsmasq-dns-675f4bcbfc-jdt8c\" (UID: \"eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jdt8c" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.110569 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/449233b3-fd99-44f8-b93c-eedacc6817e1-config\") pod \"dnsmasq-dns-78dd6ddcc-sjqwg\" (UID: \"449233b3-fd99-44f8-b93c-eedacc6817e1\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.110623 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqjqt\" (UniqueName: \"kubernetes.io/projected/449233b3-fd99-44f8-b93c-eedacc6817e1-kube-api-access-dqjqt\") pod \"dnsmasq-dns-78dd6ddcc-sjqwg\" (UID: \"449233b3-fd99-44f8-b93c-eedacc6817e1\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.110648 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/449233b3-fd99-44f8-b93c-eedacc6817e1-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-sjqwg\" (UID: \"449233b3-fd99-44f8-b93c-eedacc6817e1\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.111773 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/449233b3-fd99-44f8-b93c-eedacc6817e1-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-sjqwg\" (UID: \"449233b3-fd99-44f8-b93c-eedacc6817e1\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.111841 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/449233b3-fd99-44f8-b93c-eedacc6817e1-config\") pod \"dnsmasq-dns-78dd6ddcc-sjqwg\" (UID: \"449233b3-fd99-44f8-b93c-eedacc6817e1\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.112023 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-config\") pod \"dnsmasq-dns-675f4bcbfc-jdt8c\" (UID: \"eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jdt8c" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.514983 4910 generic.go:334] "Generic (PLEG): container finished" podID="5cfcad7b-815e-4079-bbf1-f1dd84641d31" containerID="b3ea2e46c3819543159cb51d5e43d4e9614727619648a2740393e1fce0c84021" exitCode=0 Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.515066 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xsmgt" event={"ID":"5cfcad7b-815e-4079-bbf1-f1dd84641d31","Type":"ContainerDied","Data":"b3ea2e46c3819543159cb51d5e43d4e9614727619648a2740393e1fce0c84021"} Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.670750 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-dbmkp"] Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.673419 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.678941 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dbmkp"] Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.782969 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-zxqm9" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.820929 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tsfn\" (UniqueName: \"kubernetes.io/projected/be77737c-2252-4f34-8806-df94fea74276-kube-api-access-6tsfn\") pod \"redhat-operators-dbmkp\" (UID: \"be77737c-2252-4f34-8806-df94fea74276\") " pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.821225 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be77737c-2252-4f34-8806-df94fea74276-catalog-content\") pod \"redhat-operators-dbmkp\" (UID: \"be77737c-2252-4f34-8806-df94fea74276\") " pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.821342 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be77737c-2252-4f34-8806-df94fea74276-utilities\") pod \"redhat-operators-dbmkp\" (UID: \"be77737c-2252-4f34-8806-df94fea74276\") " pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.923294 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be77737c-2252-4f34-8806-df94fea74276-catalog-content\") pod \"redhat-operators-dbmkp\" (UID: \"be77737c-2252-4f34-8806-df94fea74276\") " pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.924097 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be77737c-2252-4f34-8806-df94fea74276-utilities\") pod \"redhat-operators-dbmkp\" (UID: \"be77737c-2252-4f34-8806-df94fea74276\") " pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.924685 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tsfn\" (UniqueName: \"kubernetes.io/projected/be77737c-2252-4f34-8806-df94fea74276-kube-api-access-6tsfn\") pod \"redhat-operators-dbmkp\" (UID: \"be77737c-2252-4f34-8806-df94fea74276\") " pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.924548 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be77737c-2252-4f34-8806-df94fea74276-utilities\") pod \"redhat-operators-dbmkp\" (UID: \"be77737c-2252-4f34-8806-df94fea74276\") " pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.924024 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be77737c-2252-4f34-8806-df94fea74276-catalog-content\") pod \"redhat-operators-dbmkp\" (UID: \"be77737c-2252-4f34-8806-df94fea74276\") " pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.947650 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tsfn\" (UniqueName: \"kubernetes.io/projected/be77737c-2252-4f34-8806-df94fea74276-kube-api-access-6tsfn\") pod \"redhat-operators-dbmkp\" (UID: \"be77737c-2252-4f34-8806-df94fea74276\") " pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:27 crc kubenswrapper[4910]: I1125 21:45:27.987065 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:28 crc kubenswrapper[4910]: E1125 21:45:28.128679 4910 projected.go:288] Couldn't get configMap openstack/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 21:45:28 crc kubenswrapper[4910]: E1125 21:45:28.128763 4910 projected.go:194] Error preparing data for projected volume kube-api-access-m8cbx for pod openstack/dnsmasq-dns-675f4bcbfc-jdt8c: failed to sync configmap cache: timed out waiting for the condition Nov 25 21:45:28 crc kubenswrapper[4910]: E1125 21:45:28.128691 4910 projected.go:288] Couldn't get configMap openstack/kube-root-ca.crt: failed to sync configmap cache: timed out waiting for the condition Nov 25 21:45:28 crc kubenswrapper[4910]: E1125 21:45:28.128869 4910 projected.go:194] Error preparing data for projected volume kube-api-access-dqjqt for pod openstack/dnsmasq-dns-78dd6ddcc-sjqwg: failed to sync configmap cache: timed out waiting for the condition Nov 25 21:45:28 crc kubenswrapper[4910]: E1125 21:45:28.128831 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-kube-api-access-m8cbx podName:eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc nodeName:}" failed. No retries permitted until 2025-11-25 21:45:28.628811813 +0000 UTC m=+884.091288135 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-m8cbx" (UniqueName: "kubernetes.io/projected/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-kube-api-access-m8cbx") pod "dnsmasq-dns-675f4bcbfc-jdt8c" (UID: "eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc") : failed to sync configmap cache: timed out waiting for the condition Nov 25 21:45:28 crc kubenswrapper[4910]: E1125 21:45:28.128944 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/449233b3-fd99-44f8-b93c-eedacc6817e1-kube-api-access-dqjqt podName:449233b3-fd99-44f8-b93c-eedacc6817e1 nodeName:}" failed. No retries permitted until 2025-11-25 21:45:28.628920546 +0000 UTC m=+884.091396868 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-dqjqt" (UniqueName: "kubernetes.io/projected/449233b3-fd99-44f8-b93c-eedacc6817e1-kube-api-access-dqjqt") pod "dnsmasq-dns-78dd6ddcc-sjqwg" (UID: "449233b3-fd99-44f8-b93c-eedacc6817e1") : failed to sync configmap cache: timed out waiting for the condition Nov 25 21:45:28 crc kubenswrapper[4910]: I1125 21:45:28.166448 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 25 21:45:28 crc kubenswrapper[4910]: I1125 21:45:28.456722 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-dbmkp"] Nov 25 21:45:28 crc kubenswrapper[4910]: I1125 21:45:28.525926 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dbmkp" event={"ID":"be77737c-2252-4f34-8806-df94fea74276","Type":"ContainerStarted","Data":"5bc512634e9168a9da5441badc60964d1cecf8f77ee9b5a42f80532641c3a2ed"} Nov 25 21:45:28 crc kubenswrapper[4910]: I1125 21:45:28.531045 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xsmgt" event={"ID":"5cfcad7b-815e-4079-bbf1-f1dd84641d31","Type":"ContainerStarted","Data":"0ca41ac17b0ec0a6f124183eeedac44dffc9efd0a3ef35849b1251a1421f5a6d"} Nov 25 21:45:28 crc kubenswrapper[4910]: I1125 21:45:28.558014 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xsmgt" podStartSLOduration=2.165958427 podStartE2EDuration="3.557992559s" podCreationTimestamp="2025-11-25 21:45:25 +0000 UTC" firstStartedPulling="2025-11-25 21:45:26.510256381 +0000 UTC m=+881.972732713" lastFinishedPulling="2025-11-25 21:45:27.902290513 +0000 UTC m=+883.364766845" observedRunningTime="2025-11-25 21:45:28.551814255 +0000 UTC m=+884.014290577" watchObservedRunningTime="2025-11-25 21:45:28.557992559 +0000 UTC m=+884.020468881" Nov 25 21:45:28 crc kubenswrapper[4910]: I1125 21:45:28.634774 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqjqt\" (UniqueName: \"kubernetes.io/projected/449233b3-fd99-44f8-b93c-eedacc6817e1-kube-api-access-dqjqt\") pod \"dnsmasq-dns-78dd6ddcc-sjqwg\" (UID: \"449233b3-fd99-44f8-b93c-eedacc6817e1\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" Nov 25 21:45:28 crc kubenswrapper[4910]: I1125 21:45:28.634873 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8cbx\" (UniqueName: \"kubernetes.io/projected/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-kube-api-access-m8cbx\") pod \"dnsmasq-dns-675f4bcbfc-jdt8c\" (UID: \"eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jdt8c" Nov 25 21:45:28 crc kubenswrapper[4910]: I1125 21:45:28.641418 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqjqt\" (UniqueName: \"kubernetes.io/projected/449233b3-fd99-44f8-b93c-eedacc6817e1-kube-api-access-dqjqt\") pod \"dnsmasq-dns-78dd6ddcc-sjqwg\" (UID: \"449233b3-fd99-44f8-b93c-eedacc6817e1\") " pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" Nov 25 21:45:28 crc kubenswrapper[4910]: I1125 21:45:28.641749 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8cbx\" (UniqueName: \"kubernetes.io/projected/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-kube-api-access-m8cbx\") pod \"dnsmasq-dns-675f4bcbfc-jdt8c\" (UID: \"eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc\") " pod="openstack/dnsmasq-dns-675f4bcbfc-jdt8c" Nov 25 21:45:28 crc kubenswrapper[4910]: I1125 21:45:28.658973 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-jdt8c" Nov 25 21:45:28 crc kubenswrapper[4910]: I1125 21:45:28.710231 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" Nov 25 21:45:29 crc kubenswrapper[4910]: I1125 21:45:29.155918 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jdt8c"] Nov 25 21:45:29 crc kubenswrapper[4910]: I1125 21:45:29.265573 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-sjqwg"] Nov 25 21:45:29 crc kubenswrapper[4910]: I1125 21:45:29.554664 4910 generic.go:334] "Generic (PLEG): container finished" podID="be77737c-2252-4f34-8806-df94fea74276" containerID="6144963b9d1f73fa292cb2bf0c5e0f9948372ae605216256288c8155c6a72069" exitCode=0 Nov 25 21:45:29 crc kubenswrapper[4910]: I1125 21:45:29.554708 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dbmkp" event={"ID":"be77737c-2252-4f34-8806-df94fea74276","Type":"ContainerDied","Data":"6144963b9d1f73fa292cb2bf0c5e0f9948372ae605216256288c8155c6a72069"} Nov 25 21:45:29 crc kubenswrapper[4910]: I1125 21:45:29.559314 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-jdt8c" event={"ID":"eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc","Type":"ContainerStarted","Data":"0f75995494fbecc0bae652a227d5b5817c8c25555bd1a9bc8780a765a179c0d9"} Nov 25 21:45:29 crc kubenswrapper[4910]: I1125 21:45:29.561877 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" event={"ID":"449233b3-fd99-44f8-b93c-eedacc6817e1","Type":"ContainerStarted","Data":"db05cbd0a59aa27f4b841f1d3a12ddd5c24fa8bb58fae70b91f7c172e51509ad"} Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.000141 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jdt8c"] Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.022013 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-pllqq"] Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.024158 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-pllqq" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.064112 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-pllqq"] Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.161034 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-config\") pod \"dnsmasq-dns-666b6646f7-pllqq\" (UID: \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\") " pod="openstack/dnsmasq-dns-666b6646f7-pllqq" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.161079 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6h72\" (UniqueName: \"kubernetes.io/projected/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-kube-api-access-k6h72\") pod \"dnsmasq-dns-666b6646f7-pllqq\" (UID: \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\") " pod="openstack/dnsmasq-dns-666b6646f7-pllqq" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.161124 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-dns-svc\") pod \"dnsmasq-dns-666b6646f7-pllqq\" (UID: \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\") " pod="openstack/dnsmasq-dns-666b6646f7-pllqq" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.262465 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-config\") pod \"dnsmasq-dns-666b6646f7-pllqq\" (UID: \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\") " pod="openstack/dnsmasq-dns-666b6646f7-pllqq" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.262523 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6h72\" (UniqueName: \"kubernetes.io/projected/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-kube-api-access-k6h72\") pod \"dnsmasq-dns-666b6646f7-pllqq\" (UID: \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\") " pod="openstack/dnsmasq-dns-666b6646f7-pllqq" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.262576 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-dns-svc\") pod \"dnsmasq-dns-666b6646f7-pllqq\" (UID: \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\") " pod="openstack/dnsmasq-dns-666b6646f7-pllqq" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.263445 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-dns-svc\") pod \"dnsmasq-dns-666b6646f7-pllqq\" (UID: \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\") " pod="openstack/dnsmasq-dns-666b6646f7-pllqq" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.263960 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-config\") pod \"dnsmasq-dns-666b6646f7-pllqq\" (UID: \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\") " pod="openstack/dnsmasq-dns-666b6646f7-pllqq" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.313427 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6h72\" (UniqueName: \"kubernetes.io/projected/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-kube-api-access-k6h72\") pod \"dnsmasq-dns-666b6646f7-pllqq\" (UID: \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\") " pod="openstack/dnsmasq-dns-666b6646f7-pllqq" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.350205 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-sjqwg"] Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.380155 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-sprkw"] Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.383474 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.397901 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-pllqq" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.399909 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-sprkw"] Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.467084 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xhmw\" (UniqueName: \"kubernetes.io/projected/221cba90-1fbe-4621-a05c-fccbe18d2b92-kube-api-access-8xhmw\") pod \"dnsmasq-dns-57d769cc4f-sprkw\" (UID: \"221cba90-1fbe-4621-a05c-fccbe18d2b92\") " pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.467130 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/221cba90-1fbe-4621-a05c-fccbe18d2b92-config\") pod \"dnsmasq-dns-57d769cc4f-sprkw\" (UID: \"221cba90-1fbe-4621-a05c-fccbe18d2b92\") " pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.467152 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/221cba90-1fbe-4621-a05c-fccbe18d2b92-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-sprkw\" (UID: \"221cba90-1fbe-4621-a05c-fccbe18d2b92\") " pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.568929 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xhmw\" (UniqueName: \"kubernetes.io/projected/221cba90-1fbe-4621-a05c-fccbe18d2b92-kube-api-access-8xhmw\") pod \"dnsmasq-dns-57d769cc4f-sprkw\" (UID: \"221cba90-1fbe-4621-a05c-fccbe18d2b92\") " pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.569298 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/221cba90-1fbe-4621-a05c-fccbe18d2b92-config\") pod \"dnsmasq-dns-57d769cc4f-sprkw\" (UID: \"221cba90-1fbe-4621-a05c-fccbe18d2b92\") " pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.569356 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/221cba90-1fbe-4621-a05c-fccbe18d2b92-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-sprkw\" (UID: \"221cba90-1fbe-4621-a05c-fccbe18d2b92\") " pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.570778 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/221cba90-1fbe-4621-a05c-fccbe18d2b92-config\") pod \"dnsmasq-dns-57d769cc4f-sprkw\" (UID: \"221cba90-1fbe-4621-a05c-fccbe18d2b92\") " pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.570871 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/221cba90-1fbe-4621-a05c-fccbe18d2b92-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-sprkw\" (UID: \"221cba90-1fbe-4621-a05c-fccbe18d2b92\") " pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.590502 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xhmw\" (UniqueName: \"kubernetes.io/projected/221cba90-1fbe-4621-a05c-fccbe18d2b92-kube-api-access-8xhmw\") pod \"dnsmasq-dns-57d769cc4f-sprkw\" (UID: \"221cba90-1fbe-4621-a05c-fccbe18d2b92\") " pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" Nov 25 21:45:30 crc kubenswrapper[4910]: I1125 21:45:30.709585 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.060553 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-pllqq"] Nov 25 21:45:31 crc kubenswrapper[4910]: W1125 21:45:31.066156 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode3612f97_45d4_4d0c_81b1_19bb2e73f44f.slice/crio-f16b6aca8c34645801db13c22805f528a0c254839844b8b1f0724460744ab7e7 WatchSource:0}: Error finding container f16b6aca8c34645801db13c22805f528a0c254839844b8b1f0724460744ab7e7: Status 404 returned error can't find the container with id f16b6aca8c34645801db13c22805f528a0c254839844b8b1f0724460744ab7e7 Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.081988 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-sprkw"] Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.198886 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.201177 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.202915 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.207403 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.207701 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.207856 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-76g8c" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.207865 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.208036 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.208159 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.217587 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.279658 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.279739 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/58eca84e-dfac-4af7-ad45-241a776f81d6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.279768 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hlcp4\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-kube-api-access-hlcp4\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.279982 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.280037 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.280068 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-config-data\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.280155 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.280193 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.280398 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/58eca84e-dfac-4af7-ad45-241a776f81d6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.280435 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.280476 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.381941 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.381986 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/58eca84e-dfac-4af7-ad45-241a776f81d6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.382006 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hlcp4\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-kube-api-access-hlcp4\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.382063 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.382082 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.382103 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-config-data\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.382131 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.382150 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.382191 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/58eca84e-dfac-4af7-ad45-241a776f81d6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.382218 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.382236 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.383094 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.383916 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-config-data\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.384229 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.385070 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.388999 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.389823 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.396621 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/58eca84e-dfac-4af7-ad45-241a776f81d6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.398747 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.399178 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/58eca84e-dfac-4af7-ad45-241a776f81d6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.401995 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.409645 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hlcp4\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-kube-api-access-hlcp4\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.419133 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.508747 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.510170 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.514381 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.514628 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-7hxk7" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.514786 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.516364 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.517368 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.517652 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.518565 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.541945 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.571385 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.592309 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.594609 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.594757 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.594868 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9b20e3e8-ac28-471d-82ed-e619a78a7c55-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.594976 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.595062 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.595137 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.595271 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8w9b8\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-kube-api-access-8w9b8\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.595370 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.595447 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.595544 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9b20e3e8-ac28-471d-82ed-e619a78a7c55-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.621011 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" event={"ID":"221cba90-1fbe-4621-a05c-fccbe18d2b92","Type":"ContainerStarted","Data":"e5d3c3e6378dd9aef49dbb96ba17fbd2078bb3e8f2471c9935f047030e7e2e1a"} Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.624064 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-pllqq" event={"ID":"e3612f97-45d4-4d0c-81b1-19bb2e73f44f","Type":"ContainerStarted","Data":"f16b6aca8c34645801db13c22805f528a0c254839844b8b1f0724460744ab7e7"} Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.626999 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dbmkp" event={"ID":"be77737c-2252-4f34-8806-df94fea74276","Type":"ContainerStarted","Data":"4d6e01fe9254bd3d1d998db153b4c1c4e93f68484a6aec99c053ab1fa28300ee"} Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.698701 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.699379 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.699540 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8w9b8\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-kube-api-access-8w9b8\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.699658 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.699746 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.699849 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9b20e3e8-ac28-471d-82ed-e619a78a7c55-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.700028 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.700120 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.700218 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.700338 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9b20e3e8-ac28-471d-82ed-e619a78a7c55-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.700414 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.700505 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.701413 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.701466 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.705327 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.712991 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.714131 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9b20e3e8-ac28-471d-82ed-e619a78a7c55-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.716083 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.746140 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.747287 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.755053 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8w9b8\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-kube-api-access-8w9b8\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.762453 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9b20e3e8-ac28-471d-82ed-e619a78a7c55-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.811032 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:31 crc kubenswrapper[4910]: I1125 21:45:31.891518 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.176791 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.464763 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 21:45:32 crc kubenswrapper[4910]: W1125 21:45:32.481792 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9b20e3e8_ac28_471d_82ed_e619a78a7c55.slice/crio-a305b01ce7967d3173f986621972e99d0c18daa8bbc25f6f49552e46b9200cab WatchSource:0}: Error finding container a305b01ce7967d3173f986621972e99d0c18daa8bbc25f6f49552e46b9200cab: Status 404 returned error can't find the container with id a305b01ce7967d3173f986621972e99d0c18daa8bbc25f6f49552e46b9200cab Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.658996 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9b20e3e8-ac28-471d-82ed-e619a78a7c55","Type":"ContainerStarted","Data":"a305b01ce7967d3173f986621972e99d0c18daa8bbc25f6f49552e46b9200cab"} Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.672565 4910 generic.go:334] "Generic (PLEG): container finished" podID="be77737c-2252-4f34-8806-df94fea74276" containerID="4d6e01fe9254bd3d1d998db153b4c1c4e93f68484a6aec99c053ab1fa28300ee" exitCode=0 Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.672672 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dbmkp" event={"ID":"be77737c-2252-4f34-8806-df94fea74276","Type":"ContainerDied","Data":"4d6e01fe9254bd3d1d998db153b4c1c4e93f68484a6aec99c053ab1fa28300ee"} Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.680370 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"58eca84e-dfac-4af7-ad45-241a776f81d6","Type":"ContainerStarted","Data":"d4b506e7ae1191bfc6b0dedb055051fb3ed564e5d6e4379bd1616ddcc08fb9af"} Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.836839 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.838642 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.848942 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.849114 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.849147 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.849280 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.849405 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-r4g4p" Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.853153 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.947642 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.947709 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-kolla-config\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.947743 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwlcm\" (UniqueName: \"kubernetes.io/projected/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-kube-api-access-hwlcm\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.947813 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.947838 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.947852 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-config-data-default\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.947874 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:32 crc kubenswrapper[4910]: I1125 21:45:32.947889 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.049630 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.049715 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.049829 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-config-data-default\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.049856 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.049876 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.049955 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.049977 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-kolla-config\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.049999 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwlcm\" (UniqueName: \"kubernetes.io/projected/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-kube-api-access-hwlcm\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.051030 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.052634 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-config-data-generated\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.055323 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-operator-scripts\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.055430 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-kolla-config\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.060476 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-config-data-default\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.066663 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.072592 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwlcm\" (UniqueName: \"kubernetes.io/projected/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-kube-api-access-hwlcm\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.073300 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.121355 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"openstack-galera-0\" (UID: \"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc\") " pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.177626 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 25 21:45:33 crc kubenswrapper[4910]: I1125 21:45:33.840651 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 25 21:45:33 crc kubenswrapper[4910]: W1125 21:45:33.926907 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podecbb2a7f_c6f7_48f9_ab3a_b0cda9b445dc.slice/crio-0f5a0ccfd95c08efb0fa01537a1f99e483917b3064429d596a5a69a589a7dc36 WatchSource:0}: Error finding container 0f5a0ccfd95c08efb0fa01537a1f99e483917b3064429d596a5a69a589a7dc36: Status 404 returned error can't find the container with id 0f5a0ccfd95c08efb0fa01537a1f99e483917b3064429d596a5a69a589a7dc36 Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.304103 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.309641 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.309972 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.313553 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.313738 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.313861 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.314303 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-br9rt" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.418970 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.419928 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.425537 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.425707 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-pqh2v" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.426316 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.457792 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.481169 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.481215 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/62283554-0498-4bac-b223-8d3c6d21b614-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.481267 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/62283554-0498-4bac-b223-8d3c6d21b614-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.481293 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62283554-0498-4bac-b223-8d3c6d21b614-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.481307 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p68w6\" (UniqueName: \"kubernetes.io/projected/62283554-0498-4bac-b223-8d3c6d21b614-kube-api-access-p68w6\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.481331 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/62283554-0498-4bac-b223-8d3c6d21b614-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.481372 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62283554-0498-4bac-b223-8d3c6d21b614-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.481419 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/62283554-0498-4bac-b223-8d3c6d21b614-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.582327 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/62283554-0498-4bac-b223-8d3c6d21b614-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.582427 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c811f98e-8a72-406b-b0c3-35a7102dd46e-config-data\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.582460 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.582487 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/62283554-0498-4bac-b223-8d3c6d21b614-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.582521 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c811f98e-8a72-406b-b0c3-35a7102dd46e-kolla-config\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.582547 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/62283554-0498-4bac-b223-8d3c6d21b614-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.582571 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62283554-0498-4bac-b223-8d3c6d21b614-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.582587 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p68w6\" (UniqueName: \"kubernetes.io/projected/62283554-0498-4bac-b223-8d3c6d21b614-kube-api-access-p68w6\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.582609 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/62283554-0498-4bac-b223-8d3c6d21b614-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.582625 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c811f98e-8a72-406b-b0c3-35a7102dd46e-combined-ca-bundle\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.583391 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/c811f98e-8a72-406b-b0c3-35a7102dd46e-memcached-tls-certs\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.583469 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.583526 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxp7t\" (UniqueName: \"kubernetes.io/projected/c811f98e-8a72-406b-b0c3-35a7102dd46e-kube-api-access-fxp7t\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.583863 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/62283554-0498-4bac-b223-8d3c6d21b614-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.583939 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62283554-0498-4bac-b223-8d3c6d21b614-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.591890 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/62283554-0498-4bac-b223-8d3c6d21b614-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.593035 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/62283554-0498-4bac-b223-8d3c6d21b614-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.593070 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/62283554-0498-4bac-b223-8d3c6d21b614-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.618341 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p68w6\" (UniqueName: \"kubernetes.io/projected/62283554-0498-4bac-b223-8d3c6d21b614-kube-api-access-p68w6\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.620511 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/62283554-0498-4bac-b223-8d3c6d21b614-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.623415 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62283554-0498-4bac-b223-8d3c6d21b614-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.630511 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-cell1-galera-0\" (UID: \"62283554-0498-4bac-b223-8d3c6d21b614\") " pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.648584 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.695641 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c811f98e-8a72-406b-b0c3-35a7102dd46e-config-data\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.696505 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c811f98e-8a72-406b-b0c3-35a7102dd46e-kolla-config\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.696562 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c811f98e-8a72-406b-b0c3-35a7102dd46e-combined-ca-bundle\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.696600 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/c811f98e-8a72-406b-b0c3-35a7102dd46e-memcached-tls-certs\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.696625 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxp7t\" (UniqueName: \"kubernetes.io/projected/c811f98e-8a72-406b-b0c3-35a7102dd46e-kube-api-access-fxp7t\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.696448 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c811f98e-8a72-406b-b0c3-35a7102dd46e-config-data\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.697990 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/c811f98e-8a72-406b-b0c3-35a7102dd46e-kolla-config\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.700854 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/c811f98e-8a72-406b-b0c3-35a7102dd46e-memcached-tls-certs\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.701371 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c811f98e-8a72-406b-b0c3-35a7102dd46e-combined-ca-bundle\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.719181 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc","Type":"ContainerStarted","Data":"0f5a0ccfd95c08efb0fa01537a1f99e483917b3064429d596a5a69a589a7dc36"} Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.743673 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxp7t\" (UniqueName: \"kubernetes.io/projected/c811f98e-8a72-406b-b0c3-35a7102dd46e-kube-api-access-fxp7t\") pod \"memcached-0\" (UID: \"c811f98e-8a72-406b-b0c3-35a7102dd46e\") " pod="openstack/memcached-0" Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.754472 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dbmkp" event={"ID":"be77737c-2252-4f34-8806-df94fea74276","Type":"ContainerStarted","Data":"5d7eee0ea23d494ed6eceff41add40ab0daf70c265813c571612c3111e38e4aa"} Nov 25 21:45:34 crc kubenswrapper[4910]: I1125 21:45:34.781036 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-dbmkp" podStartSLOduration=3.842436507 podStartE2EDuration="7.781014779s" podCreationTimestamp="2025-11-25 21:45:27 +0000 UTC" firstStartedPulling="2025-11-25 21:45:29.557007053 +0000 UTC m=+885.019483375" lastFinishedPulling="2025-11-25 21:45:33.495585325 +0000 UTC m=+888.958061647" observedRunningTime="2025-11-25 21:45:34.777029703 +0000 UTC m=+890.239506025" watchObservedRunningTime="2025-11-25 21:45:34.781014779 +0000 UTC m=+890.243491101" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.042657 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.285205 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.469920 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.470255 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.486835 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-44bld"] Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.490081 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.514640 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-44bld"] Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.608881 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.616578 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjbfm\" (UniqueName: \"kubernetes.io/projected/f43e4991-59cf-42b2-a415-733675d21bb1-kube-api-access-cjbfm\") pod \"redhat-marketplace-44bld\" (UID: \"f43e4991-59cf-42b2-a415-733675d21bb1\") " pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.616638 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f43e4991-59cf-42b2-a415-733675d21bb1-catalog-content\") pod \"redhat-marketplace-44bld\" (UID: \"f43e4991-59cf-42b2-a415-733675d21bb1\") " pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.616742 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f43e4991-59cf-42b2-a415-733675d21bb1-utilities\") pod \"redhat-marketplace-44bld\" (UID: \"f43e4991-59cf-42b2-a415-733675d21bb1\") " pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.719180 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f43e4991-59cf-42b2-a415-733675d21bb1-catalog-content\") pod \"redhat-marketplace-44bld\" (UID: \"f43e4991-59cf-42b2-a415-733675d21bb1\") " pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.719312 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f43e4991-59cf-42b2-a415-733675d21bb1-utilities\") pod \"redhat-marketplace-44bld\" (UID: \"f43e4991-59cf-42b2-a415-733675d21bb1\") " pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.719397 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjbfm\" (UniqueName: \"kubernetes.io/projected/f43e4991-59cf-42b2-a415-733675d21bb1-kube-api-access-cjbfm\") pod \"redhat-marketplace-44bld\" (UID: \"f43e4991-59cf-42b2-a415-733675d21bb1\") " pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.719997 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f43e4991-59cf-42b2-a415-733675d21bb1-catalog-content\") pod \"redhat-marketplace-44bld\" (UID: \"f43e4991-59cf-42b2-a415-733675d21bb1\") " pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.720269 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f43e4991-59cf-42b2-a415-733675d21bb1-utilities\") pod \"redhat-marketplace-44bld\" (UID: \"f43e4991-59cf-42b2-a415-733675d21bb1\") " pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.749852 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjbfm\" (UniqueName: \"kubernetes.io/projected/f43e4991-59cf-42b2-a415-733675d21bb1-kube-api-access-cjbfm\") pod \"redhat-marketplace-44bld\" (UID: \"f43e4991-59cf-42b2-a415-733675d21bb1\") " pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.776673 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"62283554-0498-4bac-b223-8d3c6d21b614","Type":"ContainerStarted","Data":"b29a63b6e2e69f483edec025a41e516c5dbde3df9ed8f9af17aacfc9360a81b5"} Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.787073 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 25 21:45:35 crc kubenswrapper[4910]: W1125 21:45:35.801707 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc811f98e_8a72_406b_b0c3_35a7102dd46e.slice/crio-4689fa99e04b0523a4c6a9c34d16a219ecee5e92b91260397abbe9509280df14 WatchSource:0}: Error finding container 4689fa99e04b0523a4c6a9c34d16a219ecee5e92b91260397abbe9509280df14: Status 404 returned error can't find the container with id 4689fa99e04b0523a4c6a9c34d16a219ecee5e92b91260397abbe9509280df14 Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.812463 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:45:35 crc kubenswrapper[4910]: I1125 21:45:35.920101 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:36 crc kubenswrapper[4910]: I1125 21:45:36.481495 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-44bld"] Nov 25 21:45:36 crc kubenswrapper[4910]: I1125 21:45:36.492708 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 21:45:36 crc kubenswrapper[4910]: I1125 21:45:36.493901 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 21:45:36 crc kubenswrapper[4910]: I1125 21:45:36.506738 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 21:45:36 crc kubenswrapper[4910]: I1125 21:45:36.507475 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-s96p6" Nov 25 21:45:36 crc kubenswrapper[4910]: W1125 21:45:36.581893 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf43e4991_59cf_42b2_a415_733675d21bb1.slice/crio-d11e574cacc3fdcd356e7b6b40cdbf8e670bb692501f11475ac830e160d52a35 WatchSource:0}: Error finding container d11e574cacc3fdcd356e7b6b40cdbf8e670bb692501f11475ac830e160d52a35: Status 404 returned error can't find the container with id d11e574cacc3fdcd356e7b6b40cdbf8e670bb692501f11475ac830e160d52a35 Nov 25 21:45:36 crc kubenswrapper[4910]: I1125 21:45:36.648100 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnrbm\" (UniqueName: \"kubernetes.io/projected/28a65abf-02ed-47dd-a7ce-0cc927aac523-kube-api-access-cnrbm\") pod \"kube-state-metrics-0\" (UID: \"28a65abf-02ed-47dd-a7ce-0cc927aac523\") " pod="openstack/kube-state-metrics-0" Nov 25 21:45:36 crc kubenswrapper[4910]: I1125 21:45:36.754588 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnrbm\" (UniqueName: \"kubernetes.io/projected/28a65abf-02ed-47dd-a7ce-0cc927aac523-kube-api-access-cnrbm\") pod \"kube-state-metrics-0\" (UID: \"28a65abf-02ed-47dd-a7ce-0cc927aac523\") " pod="openstack/kube-state-metrics-0" Nov 25 21:45:36 crc kubenswrapper[4910]: I1125 21:45:36.804211 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnrbm\" (UniqueName: \"kubernetes.io/projected/28a65abf-02ed-47dd-a7ce-0cc927aac523-kube-api-access-cnrbm\") pod \"kube-state-metrics-0\" (UID: \"28a65abf-02ed-47dd-a7ce-0cc927aac523\") " pod="openstack/kube-state-metrics-0" Nov 25 21:45:36 crc kubenswrapper[4910]: I1125 21:45:36.860813 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 21:45:36 crc kubenswrapper[4910]: I1125 21:45:36.868456 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"c811f98e-8a72-406b-b0c3-35a7102dd46e","Type":"ContainerStarted","Data":"4689fa99e04b0523a4c6a9c34d16a219ecee5e92b91260397abbe9509280df14"} Nov 25 21:45:36 crc kubenswrapper[4910]: I1125 21:45:36.872331 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-44bld" event={"ID":"f43e4991-59cf-42b2-a415-733675d21bb1","Type":"ContainerStarted","Data":"d11e574cacc3fdcd356e7b6b40cdbf8e670bb692501f11475ac830e160d52a35"} Nov 25 21:45:37 crc kubenswrapper[4910]: E1125 21:45:37.113428 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf43e4991_59cf_42b2_a415_733675d21bb1.slice/crio-4177e0e170b51cd6f0ab188805f0ef4e2d5d94b1e460157e0215a698063ffc22.scope\": RecentStats: unable to find data in memory cache]" Nov 25 21:45:37 crc kubenswrapper[4910]: I1125 21:45:37.420425 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 21:45:37 crc kubenswrapper[4910]: W1125 21:45:37.436795 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28a65abf_02ed_47dd_a7ce_0cc927aac523.slice/crio-92dc1098b82946f5cca0810ce5172bd72411cb7faeb1a0ca4364e2d0fc9c04a0 WatchSource:0}: Error finding container 92dc1098b82946f5cca0810ce5172bd72411cb7faeb1a0ca4364e2d0fc9c04a0: Status 404 returned error can't find the container with id 92dc1098b82946f5cca0810ce5172bd72411cb7faeb1a0ca4364e2d0fc9c04a0 Nov 25 21:45:37 crc kubenswrapper[4910]: I1125 21:45:37.886326 4910 generic.go:334] "Generic (PLEG): container finished" podID="f43e4991-59cf-42b2-a415-733675d21bb1" containerID="4177e0e170b51cd6f0ab188805f0ef4e2d5d94b1e460157e0215a698063ffc22" exitCode=0 Nov 25 21:45:37 crc kubenswrapper[4910]: I1125 21:45:37.886407 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-44bld" event={"ID":"f43e4991-59cf-42b2-a415-733675d21bb1","Type":"ContainerDied","Data":"4177e0e170b51cd6f0ab188805f0ef4e2d5d94b1e460157e0215a698063ffc22"} Nov 25 21:45:37 crc kubenswrapper[4910]: I1125 21:45:37.895359 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"28a65abf-02ed-47dd-a7ce-0cc927aac523","Type":"ContainerStarted","Data":"92dc1098b82946f5cca0810ce5172bd72411cb7faeb1a0ca4364e2d0fc9c04a0"} Nov 25 21:45:37 crc kubenswrapper[4910]: I1125 21:45:37.987647 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:37 crc kubenswrapper[4910]: I1125 21:45:37.987971 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:38 crc kubenswrapper[4910]: I1125 21:45:38.057986 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xsmgt"] Nov 25 21:45:38 crc kubenswrapper[4910]: I1125 21:45:38.915908 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xsmgt" podUID="5cfcad7b-815e-4079-bbf1-f1dd84641d31" containerName="registry-server" containerID="cri-o://0ca41ac17b0ec0a6f124183eeedac44dffc9efd0a3ef35849b1251a1421f5a6d" gracePeriod=2 Nov 25 21:45:39 crc kubenswrapper[4910]: I1125 21:45:39.104633 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-dbmkp" podUID="be77737c-2252-4f34-8806-df94fea74276" containerName="registry-server" probeResult="failure" output=< Nov 25 21:45:39 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Nov 25 21:45:39 crc kubenswrapper[4910]: > Nov 25 21:45:39 crc kubenswrapper[4910]: I1125 21:45:39.935708 4910 generic.go:334] "Generic (PLEG): container finished" podID="5cfcad7b-815e-4079-bbf1-f1dd84641d31" containerID="0ca41ac17b0ec0a6f124183eeedac44dffc9efd0a3ef35849b1251a1421f5a6d" exitCode=0 Nov 25 21:45:39 crc kubenswrapper[4910]: I1125 21:45:39.935765 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xsmgt" event={"ID":"5cfcad7b-815e-4079-bbf1-f1dd84641d31","Type":"ContainerDied","Data":"0ca41ac17b0ec0a6f124183eeedac44dffc9efd0a3ef35849b1251a1421f5a6d"} Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.140951 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-dbkwd"] Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.142739 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.145990 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-tbl9n" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.146108 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.160380 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-554dc"] Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.161844 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.171573 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.175410 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-dbkwd"] Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.183915 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-554dc"] Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.249583 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/d271e423-f378-4368-b055-d89cea058d38-var-log\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.249649 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-var-run-ovn\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.249748 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-var-log-ovn\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.249800 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d271e423-f378-4368-b055-d89cea058d38-var-run\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.249822 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-var-run\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.249857 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-smk5f\" (UniqueName: \"kubernetes.io/projected/d271e423-f378-4368-b055-d89cea058d38-kube-api-access-smk5f\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.249895 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/d271e423-f378-4368-b055-d89cea058d38-var-lib\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.249917 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-combined-ca-bundle\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.249948 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvpkd\" (UniqueName: \"kubernetes.io/projected/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-kube-api-access-pvpkd\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.250009 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d271e423-f378-4368-b055-d89cea058d38-scripts\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.250032 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-ovn-controller-tls-certs\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.250061 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-scripts\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.250190 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/d271e423-f378-4368-b055-d89cea058d38-etc-ovs\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.351964 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-var-log-ovn\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352008 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d271e423-f378-4368-b055-d89cea058d38-var-run\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352028 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-var-run\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352053 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-smk5f\" (UniqueName: \"kubernetes.io/projected/d271e423-f378-4368-b055-d89cea058d38-kube-api-access-smk5f\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352075 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/d271e423-f378-4368-b055-d89cea058d38-var-lib\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352107 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-combined-ca-bundle\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352124 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvpkd\" (UniqueName: \"kubernetes.io/projected/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-kube-api-access-pvpkd\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352155 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d271e423-f378-4368-b055-d89cea058d38-scripts\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352172 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-ovn-controller-tls-certs\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352189 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-scripts\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352275 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/d271e423-f378-4368-b055-d89cea058d38-etc-ovs\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352349 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/d271e423-f378-4368-b055-d89cea058d38-var-log\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352364 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-var-run-ovn\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352625 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-var-run\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352704 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d271e423-f378-4368-b055-d89cea058d38-var-run\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.352929 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/d271e423-f378-4368-b055-d89cea058d38-var-log\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.353137 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-var-log-ovn\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.353585 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/d271e423-f378-4368-b055-d89cea058d38-etc-ovs\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.353838 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/d271e423-f378-4368-b055-d89cea058d38-var-lib\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.355341 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-var-run-ovn\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.355918 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d271e423-f378-4368-b055-d89cea058d38-scripts\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.359034 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-ovn-controller-tls-certs\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.362615 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-scripts\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.369563 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-combined-ca-bundle\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.370077 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-smk5f\" (UniqueName: \"kubernetes.io/projected/d271e423-f378-4368-b055-d89cea058d38-kube-api-access-smk5f\") pod \"ovn-controller-ovs-dbkwd\" (UID: \"d271e423-f378-4368-b055-d89cea058d38\") " pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.381131 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvpkd\" (UniqueName: \"kubernetes.io/projected/5d3afe23-a5d2-4f9c-bdaa-f80020ef6226-kube-api-access-pvpkd\") pod \"ovn-controller-554dc\" (UID: \"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226\") " pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.481706 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.504950 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-554dc" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.601634 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.603882 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.605793 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-bqcll" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.611934 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.611971 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.612014 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.612490 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.619277 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.758652 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.758709 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.758754 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9bkm\" (UniqueName: \"kubernetes.io/projected/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-kube-api-access-f9bkm\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.758770 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-config\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.758826 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.758853 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.758870 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.758885 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.864889 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.864958 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9bkm\" (UniqueName: \"kubernetes.io/projected/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-kube-api-access-f9bkm\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.864978 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-config\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.865037 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.865067 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.865091 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.865109 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.865133 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.866502 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.868818 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.869134 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.869168 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-config\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.871682 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.872213 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.886647 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.896988 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9bkm\" (UniqueName: \"kubernetes.io/projected/d7e886f1-04bd-4061-9a6c-18a20a1d7cbe-kube-api-access-f9bkm\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.897747 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe\") " pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:40 crc kubenswrapper[4910]: I1125 21:45:40.934030 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 25 21:45:43 crc kubenswrapper[4910]: I1125 21:45:43.933238 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 21:45:43 crc kubenswrapper[4910]: I1125 21:45:43.935463 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:43 crc kubenswrapper[4910]: I1125 21:45:43.939579 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 25 21:45:43 crc kubenswrapper[4910]: I1125 21:45:43.939584 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-cvld2" Nov 25 21:45:43 crc kubenswrapper[4910]: I1125 21:45:43.940000 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 25 21:45:43 crc kubenswrapper[4910]: I1125 21:45:43.946470 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 25 21:45:43 crc kubenswrapper[4910]: I1125 21:45:43.959698 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.123884 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4285011-1eac-4f3c-af27-c6c6ad03d8de-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.123933 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.123959 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d4285011-1eac-4f3c-af27-c6c6ad03d8de-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.123992 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4285011-1eac-4f3c-af27-c6c6ad03d8de-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.124011 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4285011-1eac-4f3c-af27-c6c6ad03d8de-config\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.124131 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44szd\" (UniqueName: \"kubernetes.io/projected/d4285011-1eac-4f3c-af27-c6c6ad03d8de-kube-api-access-44szd\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.124231 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d4285011-1eac-4f3c-af27-c6c6ad03d8de-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.124288 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4285011-1eac-4f3c-af27-c6c6ad03d8de-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.225481 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4285011-1eac-4f3c-af27-c6c6ad03d8de-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.225539 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.225572 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d4285011-1eac-4f3c-af27-c6c6ad03d8de-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.225601 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4285011-1eac-4f3c-af27-c6c6ad03d8de-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.225622 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4285011-1eac-4f3c-af27-c6c6ad03d8de-config\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.225645 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44szd\" (UniqueName: \"kubernetes.io/projected/d4285011-1eac-4f3c-af27-c6c6ad03d8de-kube-api-access-44szd\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.225671 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d4285011-1eac-4f3c-af27-c6c6ad03d8de-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.225694 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4285011-1eac-4f3c-af27-c6c6ad03d8de-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.225841 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.226494 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/d4285011-1eac-4f3c-af27-c6c6ad03d8de-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.226545 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d4285011-1eac-4f3c-af27-c6c6ad03d8de-config\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.226910 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d4285011-1eac-4f3c-af27-c6c6ad03d8de-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.229958 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4285011-1eac-4f3c-af27-c6c6ad03d8de-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.230546 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/d4285011-1eac-4f3c-af27-c6c6ad03d8de-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.231778 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d4285011-1eac-4f3c-af27-c6c6ad03d8de-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.241749 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44szd\" (UniqueName: \"kubernetes.io/projected/d4285011-1eac-4f3c-af27-c6c6ad03d8de-kube-api-access-44szd\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.249586 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"d4285011-1eac-4f3c-af27-c6c6ad03d8de\") " pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:44 crc kubenswrapper[4910]: I1125 21:45:44.259428 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 25 21:45:45 crc kubenswrapper[4910]: E1125 21:45:45.469709 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ca41ac17b0ec0a6f124183eeedac44dffc9efd0a3ef35849b1251a1421f5a6d is running failed: container process not found" containerID="0ca41ac17b0ec0a6f124183eeedac44dffc9efd0a3ef35849b1251a1421f5a6d" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 21:45:45 crc kubenswrapper[4910]: E1125 21:45:45.470046 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ca41ac17b0ec0a6f124183eeedac44dffc9efd0a3ef35849b1251a1421f5a6d is running failed: container process not found" containerID="0ca41ac17b0ec0a6f124183eeedac44dffc9efd0a3ef35849b1251a1421f5a6d" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 21:45:45 crc kubenswrapper[4910]: E1125 21:45:45.470592 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ca41ac17b0ec0a6f124183eeedac44dffc9efd0a3ef35849b1251a1421f5a6d is running failed: container process not found" containerID="0ca41ac17b0ec0a6f124183eeedac44dffc9efd0a3ef35849b1251a1421f5a6d" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 21:45:45 crc kubenswrapper[4910]: E1125 21:45:45.470675 4910 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 0ca41ac17b0ec0a6f124183eeedac44dffc9efd0a3ef35849b1251a1421f5a6d is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-xsmgt" podUID="5cfcad7b-815e-4079-bbf1-f1dd84641d31" containerName="registry-server" Nov 25 21:45:47 crc kubenswrapper[4910]: I1125 21:45:47.726306 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:47 crc kubenswrapper[4910]: I1125 21:45:47.890370 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ljlv4\" (UniqueName: \"kubernetes.io/projected/5cfcad7b-815e-4079-bbf1-f1dd84641d31-kube-api-access-ljlv4\") pod \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\" (UID: \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\") " Nov 25 21:45:47 crc kubenswrapper[4910]: I1125 21:45:47.890431 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cfcad7b-815e-4079-bbf1-f1dd84641d31-utilities\") pod \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\" (UID: \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\") " Nov 25 21:45:47 crc kubenswrapper[4910]: I1125 21:45:47.890631 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cfcad7b-815e-4079-bbf1-f1dd84641d31-catalog-content\") pod \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\" (UID: \"5cfcad7b-815e-4079-bbf1-f1dd84641d31\") " Nov 25 21:45:47 crc kubenswrapper[4910]: I1125 21:45:47.891177 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5cfcad7b-815e-4079-bbf1-f1dd84641d31-utilities" (OuterVolumeSpecName: "utilities") pod "5cfcad7b-815e-4079-bbf1-f1dd84641d31" (UID: "5cfcad7b-815e-4079-bbf1-f1dd84641d31"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:45:47 crc kubenswrapper[4910]: I1125 21:45:47.895626 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5cfcad7b-815e-4079-bbf1-f1dd84641d31-kube-api-access-ljlv4" (OuterVolumeSpecName: "kube-api-access-ljlv4") pod "5cfcad7b-815e-4079-bbf1-f1dd84641d31" (UID: "5cfcad7b-815e-4079-bbf1-f1dd84641d31"). InnerVolumeSpecName "kube-api-access-ljlv4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:45:47 crc kubenswrapper[4910]: I1125 21:45:47.955695 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5cfcad7b-815e-4079-bbf1-f1dd84641d31-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5cfcad7b-815e-4079-bbf1-f1dd84641d31" (UID: "5cfcad7b-815e-4079-bbf1-f1dd84641d31"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:45:47 crc kubenswrapper[4910]: I1125 21:45:47.992538 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5cfcad7b-815e-4079-bbf1-f1dd84641d31-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:45:47 crc kubenswrapper[4910]: I1125 21:45:47.992571 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ljlv4\" (UniqueName: \"kubernetes.io/projected/5cfcad7b-815e-4079-bbf1-f1dd84641d31-kube-api-access-ljlv4\") on node \"crc\" DevicePath \"\"" Nov 25 21:45:47 crc kubenswrapper[4910]: I1125 21:45:47.992588 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5cfcad7b-815e-4079-bbf1-f1dd84641d31-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:45:47 crc kubenswrapper[4910]: I1125 21:45:47.998495 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xsmgt" event={"ID":"5cfcad7b-815e-4079-bbf1-f1dd84641d31","Type":"ContainerDied","Data":"c5d1e6980929e649f04f944db778b6c57a26da80eada61bc0f9cbcc8d5df5c66"} Nov 25 21:45:47 crc kubenswrapper[4910]: I1125 21:45:47.998826 4910 scope.go:117] "RemoveContainer" containerID="0ca41ac17b0ec0a6f124183eeedac44dffc9efd0a3ef35849b1251a1421f5a6d" Nov 25 21:45:47 crc kubenswrapper[4910]: I1125 21:45:47.998617 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xsmgt" Nov 25 21:45:48 crc kubenswrapper[4910]: I1125 21:45:48.035769 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xsmgt"] Nov 25 21:45:48 crc kubenswrapper[4910]: I1125 21:45:48.042011 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xsmgt"] Nov 25 21:45:48 crc kubenswrapper[4910]: I1125 21:45:48.043688 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:48 crc kubenswrapper[4910]: I1125 21:45:48.087013 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:49 crc kubenswrapper[4910]: I1125 21:45:49.221098 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5cfcad7b-815e-4079-bbf1-f1dd84641d31" path="/var/lib/kubelet/pods/5cfcad7b-815e-4079-bbf1-f1dd84641d31/volumes" Nov 25 21:45:49 crc kubenswrapper[4910]: I1125 21:45:49.961450 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dbmkp"] Nov 25 21:45:50 crc kubenswrapper[4910]: I1125 21:45:50.013920 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-dbmkp" podUID="be77737c-2252-4f34-8806-df94fea74276" containerName="registry-server" containerID="cri-o://5d7eee0ea23d494ed6eceff41add40ab0daf70c265813c571612c3111e38e4aa" gracePeriod=2 Nov 25 21:45:50 crc kubenswrapper[4910]: E1125 21:45:50.281479 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 25 21:45:50 crc kubenswrapper[4910]: E1125 21:45:50.281689 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8w9b8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(9b20e3e8-ac28-471d-82ed-e619a78a7c55): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 21:45:50 crc kubenswrapper[4910]: E1125 21:45:50.282874 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="9b20e3e8-ac28-471d-82ed-e619a78a7c55" Nov 25 21:45:51 crc kubenswrapper[4910]: I1125 21:45:51.034509 4910 generic.go:334] "Generic (PLEG): container finished" podID="be77737c-2252-4f34-8806-df94fea74276" containerID="5d7eee0ea23d494ed6eceff41add40ab0daf70c265813c571612c3111e38e4aa" exitCode=0 Nov 25 21:45:51 crc kubenswrapper[4910]: I1125 21:45:51.034594 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dbmkp" event={"ID":"be77737c-2252-4f34-8806-df94fea74276","Type":"ContainerDied","Data":"5d7eee0ea23d494ed6eceff41add40ab0daf70c265813c571612c3111e38e4aa"} Nov 25 21:45:51 crc kubenswrapper[4910]: E1125 21:45:51.036692 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="9b20e3e8-ac28-471d-82ed-e619a78a7c55" Nov 25 21:45:52 crc kubenswrapper[4910]: E1125 21:45:52.448159 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Nov 25 21:45:52 crc kubenswrapper[4910]: E1125 21:45:52.448466 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hlcp4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(58eca84e-dfac-4af7-ad45-241a776f81d6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 21:45:52 crc kubenswrapper[4910]: E1125 21:45:52.449701 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="58eca84e-dfac-4af7-ad45-241a776f81d6" Nov 25 21:45:53 crc kubenswrapper[4910]: E1125 21:45:53.049836 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="58eca84e-dfac-4af7-ad45-241a776f81d6" Nov 25 21:45:53 crc kubenswrapper[4910]: I1125 21:45:53.099381 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:45:53 crc kubenswrapper[4910]: I1125 21:45:53.099918 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:45:53 crc kubenswrapper[4910]: E1125 21:45:53.216802 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Nov 25 21:45:53 crc kubenswrapper[4910]: E1125 21:45:53.217621 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n677h54bh56h5c5h549h594h5b7h5d6hbch655h5cdh65dh586hb7h567h5cfh9dh5d8h5dch568h56ch686h64fh5f6hb4h66bh545h64bh685h58dhdbh5f8q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fxp7t,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(c811f98e-8a72-406b-b0c3-35a7102dd46e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 21:45:53 crc kubenswrapper[4910]: E1125 21:45:53.219193 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="c811f98e-8a72-406b-b0c3-35a7102dd46e" Nov 25 21:45:53 crc kubenswrapper[4910]: I1125 21:45:53.806165 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-dbkwd"] Nov 25 21:45:54 crc kubenswrapper[4910]: E1125 21:45:54.056942 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="c811f98e-8a72-406b-b0c3-35a7102dd46e" Nov 25 21:45:57 crc kubenswrapper[4910]: I1125 21:45:57.827721 4910 scope.go:117] "RemoveContainer" containerID="b3ea2e46c3819543159cb51d5e43d4e9614727619648a2740393e1fce0c84021" Nov 25 21:45:57 crc kubenswrapper[4910]: W1125 21:45:57.941509 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd271e423_f378_4368_b055_d89cea058d38.slice/crio-945d05f5d87d01001ff4707436aef59bf1c0b67a54ec291d87d0b30b09748a8b WatchSource:0}: Error finding container 945d05f5d87d01001ff4707436aef59bf1c0b67a54ec291d87d0b30b09748a8b: Status 404 returned error can't find the container with id 945d05f5d87d01001ff4707436aef59bf1c0b67a54ec291d87d0b30b09748a8b Nov 25 21:45:57 crc kubenswrapper[4910]: E1125 21:45:57.989161 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5d7eee0ea23d494ed6eceff41add40ab0daf70c265813c571612c3111e38e4aa is running failed: container process not found" containerID="5d7eee0ea23d494ed6eceff41add40ab0daf70c265813c571612c3111e38e4aa" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 21:45:57 crc kubenswrapper[4910]: E1125 21:45:57.990060 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5d7eee0ea23d494ed6eceff41add40ab0daf70c265813c571612c3111e38e4aa is running failed: container process not found" containerID="5d7eee0ea23d494ed6eceff41add40ab0daf70c265813c571612c3111e38e4aa" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 21:45:57 crc kubenswrapper[4910]: E1125 21:45:57.991609 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5d7eee0ea23d494ed6eceff41add40ab0daf70c265813c571612c3111e38e4aa is running failed: container process not found" containerID="5d7eee0ea23d494ed6eceff41add40ab0daf70c265813c571612c3111e38e4aa" cmd=["grpc_health_probe","-addr=:50051"] Nov 25 21:45:57 crc kubenswrapper[4910]: E1125 21:45:57.991684 4910 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 5d7eee0ea23d494ed6eceff41add40ab0daf70c265813c571612c3111e38e4aa is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/redhat-operators-dbmkp" podUID="be77737c-2252-4f34-8806-df94fea74276" containerName="registry-server" Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.021795 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.098288 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-dbmkp" Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.098484 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-dbmkp" event={"ID":"be77737c-2252-4f34-8806-df94fea74276","Type":"ContainerDied","Data":"5bc512634e9168a9da5441badc60964d1cecf8f77ee9b5a42f80532641c3a2ed"} Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.100782 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-dbkwd" event={"ID":"d271e423-f378-4368-b055-d89cea058d38","Type":"ContainerStarted","Data":"945d05f5d87d01001ff4707436aef59bf1c0b67a54ec291d87d0b30b09748a8b"} Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.199880 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be77737c-2252-4f34-8806-df94fea74276-catalog-content\") pod \"be77737c-2252-4f34-8806-df94fea74276\" (UID: \"be77737c-2252-4f34-8806-df94fea74276\") " Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.199934 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be77737c-2252-4f34-8806-df94fea74276-utilities\") pod \"be77737c-2252-4f34-8806-df94fea74276\" (UID: \"be77737c-2252-4f34-8806-df94fea74276\") " Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.200014 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tsfn\" (UniqueName: \"kubernetes.io/projected/be77737c-2252-4f34-8806-df94fea74276-kube-api-access-6tsfn\") pod \"be77737c-2252-4f34-8806-df94fea74276\" (UID: \"be77737c-2252-4f34-8806-df94fea74276\") " Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.201052 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be77737c-2252-4f34-8806-df94fea74276-utilities" (OuterVolumeSpecName: "utilities") pod "be77737c-2252-4f34-8806-df94fea74276" (UID: "be77737c-2252-4f34-8806-df94fea74276"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.227153 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be77737c-2252-4f34-8806-df94fea74276-kube-api-access-6tsfn" (OuterVolumeSpecName: "kube-api-access-6tsfn") pod "be77737c-2252-4f34-8806-df94fea74276" (UID: "be77737c-2252-4f34-8806-df94fea74276"). InnerVolumeSpecName "kube-api-access-6tsfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.302485 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tsfn\" (UniqueName: \"kubernetes.io/projected/be77737c-2252-4f34-8806-df94fea74276-kube-api-access-6tsfn\") on node \"crc\" DevicePath \"\"" Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.302525 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/be77737c-2252-4f34-8806-df94fea74276-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.306881 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/be77737c-2252-4f34-8806-df94fea74276-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "be77737c-2252-4f34-8806-df94fea74276" (UID: "be77737c-2252-4f34-8806-df94fea74276"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.405474 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/be77737c-2252-4f34-8806-df94fea74276-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.413181 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-554dc"] Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.446787 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-dbmkp"] Nov 25 21:45:58 crc kubenswrapper[4910]: I1125 21:45:58.454261 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-dbmkp"] Nov 25 21:45:58 crc kubenswrapper[4910]: E1125 21:45:58.788156 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 21:45:58 crc kubenswrapper[4910]: E1125 21:45:58.788366 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k6h72,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-pllqq_openstack(e3612f97-45d4-4d0c-81b1-19bb2e73f44f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 21:45:58 crc kubenswrapper[4910]: E1125 21:45:58.789265 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 21:45:58 crc kubenswrapper[4910]: E1125 21:45:58.789388 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-m8cbx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-jdt8c_openstack(eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 21:45:58 crc kubenswrapper[4910]: E1125 21:45:58.789517 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-pllqq" podUID="e3612f97-45d4-4d0c-81b1-19bb2e73f44f" Nov 25 21:45:58 crc kubenswrapper[4910]: E1125 21:45:58.791520 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-jdt8c" podUID="eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc" Nov 25 21:45:59 crc kubenswrapper[4910]: W1125 21:45:59.077674 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5d3afe23_a5d2_4f9c_bdaa_f80020ef6226.slice/crio-466797d3edd1340e191d82a76254ab2fab1096f9ea77180eb0bfd58289f443b7 WatchSource:0}: Error finding container 466797d3edd1340e191d82a76254ab2fab1096f9ea77180eb0bfd58289f443b7: Status 404 returned error can't find the container with id 466797d3edd1340e191d82a76254ab2fab1096f9ea77180eb0bfd58289f443b7 Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.079417 4910 scope.go:117] "RemoveContainer" containerID="8c89f54d0acd62c989899fd5db43ca2c5e26b8d30bb0163e32f81d27cd08fb75" Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.122739 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-554dc" event={"ID":"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226","Type":"ContainerStarted","Data":"466797d3edd1340e191d82a76254ab2fab1096f9ea77180eb0bfd58289f443b7"} Nov 25 21:45:59 crc kubenswrapper[4910]: E1125 21:45:59.127983 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-pllqq" podUID="e3612f97-45d4-4d0c-81b1-19bb2e73f44f" Nov 25 21:45:59 crc kubenswrapper[4910]: E1125 21:45:59.150116 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 21:45:59 crc kubenswrapper[4910]: E1125 21:45:59.150394 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8xhmw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-sprkw_openstack(221cba90-1fbe-4621-a05c-fccbe18d2b92): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 21:45:59 crc kubenswrapper[4910]: E1125 21:45:59.151662 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" podUID="221cba90-1fbe-4621-a05c-fccbe18d2b92" Nov 25 21:45:59 crc kubenswrapper[4910]: E1125 21:45:59.174027 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Nov 25 21:45:59 crc kubenswrapper[4910]: E1125 21:45:59.174260 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dqjqt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-sjqwg_openstack(449233b3-fd99-44f8-b93c-eedacc6817e1): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 21:45:59 crc kubenswrapper[4910]: E1125 21:45:59.175558 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" podUID="449233b3-fd99-44f8-b93c-eedacc6817e1" Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.214716 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be77737c-2252-4f34-8806-df94fea74276" path="/var/lib/kubelet/pods/be77737c-2252-4f34-8806-df94fea74276/volumes" Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.218058 4910 scope.go:117] "RemoveContainer" containerID="5d7eee0ea23d494ed6eceff41add40ab0daf70c265813c571612c3111e38e4aa" Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.350300 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.437200 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.459608 4910 scope.go:117] "RemoveContainer" containerID="4d6e01fe9254bd3d1d998db153b4c1c4e93f68484a6aec99c053ab1fa28300ee" Nov 25 21:45:59 crc kubenswrapper[4910]: W1125 21:45:59.467948 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd4285011_1eac_4f3c_af27_c6c6ad03d8de.slice/crio-d48756ae66498c1e9483e0fe203e997ac7fd34ab41816125606072cf5852ceb4 WatchSource:0}: Error finding container d48756ae66498c1e9483e0fe203e997ac7fd34ab41816125606072cf5852ceb4: Status 404 returned error can't find the container with id d48756ae66498c1e9483e0fe203e997ac7fd34ab41816125606072cf5852ceb4 Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.504990 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-jdt8c" Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.632235 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8cbx\" (UniqueName: \"kubernetes.io/projected/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-kube-api-access-m8cbx\") pod \"eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc\" (UID: \"eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc\") " Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.632824 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-config\") pod \"eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc\" (UID: \"eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc\") " Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.633575 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-config" (OuterVolumeSpecName: "config") pod "eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc" (UID: "eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.637618 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-kube-api-access-m8cbx" (OuterVolumeSpecName: "kube-api-access-m8cbx") pod "eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc" (UID: "eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc"). InnerVolumeSpecName "kube-api-access-m8cbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.734964 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.734998 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8cbx\" (UniqueName: \"kubernetes.io/projected/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc-kube-api-access-m8cbx\") on node \"crc\" DevicePath \"\"" Nov 25 21:45:59 crc kubenswrapper[4910]: I1125 21:45:59.775236 4910 scope.go:117] "RemoveContainer" containerID="6144963b9d1f73fa292cb2bf0c5e0f9948372ae605216256288c8155c6a72069" Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.143103 4910 generic.go:334] "Generic (PLEG): container finished" podID="f43e4991-59cf-42b2-a415-733675d21bb1" containerID="44f98276e600f758d677728483174e3f9a724e12f423211a5cfccf57bbc64cb7" exitCode=0 Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.143573 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-44bld" event={"ID":"f43e4991-59cf-42b2-a415-733675d21bb1","Type":"ContainerDied","Data":"44f98276e600f758d677728483174e3f9a724e12f423211a5cfccf57bbc64cb7"} Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.148573 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe","Type":"ContainerStarted","Data":"d608f1199c026380fe5eb768c1b561687edd53eb56f2dea21e267dc666f0ee7c"} Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.165504 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-jdt8c" Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.165508 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-jdt8c" event={"ID":"eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc","Type":"ContainerDied","Data":"0f75995494fbecc0bae652a227d5b5817c8c25555bd1a9bc8780a765a179c0d9"} Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.169935 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d4285011-1eac-4f3c-af27-c6c6ad03d8de","Type":"ContainerStarted","Data":"d48756ae66498c1e9483e0fe203e997ac7fd34ab41816125606072cf5852ceb4"} Nov 25 21:46:00 crc kubenswrapper[4910]: E1125 21:46:00.173849 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" podUID="221cba90-1fbe-4621-a05c-fccbe18d2b92" Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.247597 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jdt8c"] Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.252961 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-jdt8c"] Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.594736 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.651133 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dqjqt\" (UniqueName: \"kubernetes.io/projected/449233b3-fd99-44f8-b93c-eedacc6817e1-kube-api-access-dqjqt\") pod \"449233b3-fd99-44f8-b93c-eedacc6817e1\" (UID: \"449233b3-fd99-44f8-b93c-eedacc6817e1\") " Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.651207 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/449233b3-fd99-44f8-b93c-eedacc6817e1-config\") pod \"449233b3-fd99-44f8-b93c-eedacc6817e1\" (UID: \"449233b3-fd99-44f8-b93c-eedacc6817e1\") " Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.651290 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/449233b3-fd99-44f8-b93c-eedacc6817e1-dns-svc\") pod \"449233b3-fd99-44f8-b93c-eedacc6817e1\" (UID: \"449233b3-fd99-44f8-b93c-eedacc6817e1\") " Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.652165 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/449233b3-fd99-44f8-b93c-eedacc6817e1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "449233b3-fd99-44f8-b93c-eedacc6817e1" (UID: "449233b3-fd99-44f8-b93c-eedacc6817e1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.653407 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/449233b3-fd99-44f8-b93c-eedacc6817e1-config" (OuterVolumeSpecName: "config") pod "449233b3-fd99-44f8-b93c-eedacc6817e1" (UID: "449233b3-fd99-44f8-b93c-eedacc6817e1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.657869 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/449233b3-fd99-44f8-b93c-eedacc6817e1-kube-api-access-dqjqt" (OuterVolumeSpecName: "kube-api-access-dqjqt") pod "449233b3-fd99-44f8-b93c-eedacc6817e1" (UID: "449233b3-fd99-44f8-b93c-eedacc6817e1"). InnerVolumeSpecName "kube-api-access-dqjqt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.752922 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/449233b3-fd99-44f8-b93c-eedacc6817e1-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.752956 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/449233b3-fd99-44f8-b93c-eedacc6817e1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:00 crc kubenswrapper[4910]: I1125 21:46:00.752966 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dqjqt\" (UniqueName: \"kubernetes.io/projected/449233b3-fd99-44f8-b93c-eedacc6817e1-kube-api-access-dqjqt\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:01 crc kubenswrapper[4910]: I1125 21:46:01.181282 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" event={"ID":"449233b3-fd99-44f8-b93c-eedacc6817e1","Type":"ContainerDied","Data":"db05cbd0a59aa27f4b841f1d3a12ddd5c24fa8bb58fae70b91f7c172e51509ad"} Nov 25 21:46:01 crc kubenswrapper[4910]: I1125 21:46:01.181370 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-sjqwg" Nov 25 21:46:01 crc kubenswrapper[4910]: I1125 21:46:01.217234 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc" path="/var/lib/kubelet/pods/eaa1fb96-5fcc-4bbb-bbc0-f99fce7130dc/volumes" Nov 25 21:46:01 crc kubenswrapper[4910]: I1125 21:46:01.251294 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-sjqwg"] Nov 25 21:46:01 crc kubenswrapper[4910]: I1125 21:46:01.251359 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-sjqwg"] Nov 25 21:46:02 crc kubenswrapper[4910]: I1125 21:46:02.193498 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc","Type":"ContainerStarted","Data":"43ce548a7cf08fddce35077f3bc11b30b0ff1975671a19fb7157b2c08ae1dd5d"} Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.246935 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="449233b3-fd99-44f8-b93c-eedacc6817e1" path="/var/lib/kubelet/pods/449233b3-fd99-44f8-b93c-eedacc6817e1/volumes" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.252209 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"62283554-0498-4bac-b223-8d3c6d21b614","Type":"ContainerStarted","Data":"565ebec9fe2b7cb2d6be7d37978f5152e6287e7ba1d0a2eb22e5ce29a2a0ffc3"} Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.257627 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe","Type":"ContainerStarted","Data":"377c1efddcdb93a261d7bc7e73c0712e17c10f586df9a00d8e6e259d5bdd0ef7"} Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.260741 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d4285011-1eac-4f3c-af27-c6c6ad03d8de","Type":"ContainerStarted","Data":"20fda13cace97213a6f494cdd3a7634992c4dcdfcdfc1673e6d3eb5080768d89"} Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.263728 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-554dc" event={"ID":"5d3afe23-a5d2-4f9c-bdaa-f80020ef6226","Type":"ContainerStarted","Data":"7f5d27fa32fc5b2fd20f52e7f3d99376e3b98d320b9de7777c254f8dbc7d5eb3"} Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.264213 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-554dc" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.267885 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-44bld" event={"ID":"f43e4991-59cf-42b2-a415-733675d21bb1","Type":"ContainerStarted","Data":"a14b863c73497d00630c53dbcc05cd34895fe1a4fcbca81116121c8adbec5aad"} Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.271320 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-dbkwd" event={"ID":"d271e423-f378-4368-b055-d89cea058d38","Type":"ContainerStarted","Data":"b914217eb6875d82b28c33e830ea1e062c690627ca231966745a1482af0a89a7"} Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.284233 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"28a65abf-02ed-47dd-a7ce-0cc927aac523","Type":"ContainerStarted","Data":"61fb033907cb5d2113f4769d2932fc67220ab269e1c601f9d062403287425132"} Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.284384 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.296685 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-44bld" podStartSLOduration=3.591073702 podStartE2EDuration="28.296662476s" podCreationTimestamp="2025-11-25 21:45:35 +0000 UTC" firstStartedPulling="2025-11-25 21:45:37.89030567 +0000 UTC m=+893.352781992" lastFinishedPulling="2025-11-25 21:46:02.595894444 +0000 UTC m=+918.058370766" observedRunningTime="2025-11-25 21:46:03.293933223 +0000 UTC m=+918.756409545" watchObservedRunningTime="2025-11-25 21:46:03.296662476 +0000 UTC m=+918.759138798" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.314733 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-554dc" podStartSLOduration=19.798226979 podStartE2EDuration="23.314710174s" podCreationTimestamp="2025-11-25 21:45:40 +0000 UTC" firstStartedPulling="2025-11-25 21:45:59.080312703 +0000 UTC m=+914.542789025" lastFinishedPulling="2025-11-25 21:46:02.596795898 +0000 UTC m=+918.059272220" observedRunningTime="2025-11-25 21:46:03.311306214 +0000 UTC m=+918.773782536" watchObservedRunningTime="2025-11-25 21:46:03.314710174 +0000 UTC m=+918.777186496" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.343853 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.204747393 podStartE2EDuration="27.343833327s" podCreationTimestamp="2025-11-25 21:45:36 +0000 UTC" firstStartedPulling="2025-11-25 21:45:37.441563495 +0000 UTC m=+892.904039817" lastFinishedPulling="2025-11-25 21:46:02.580649429 +0000 UTC m=+918.043125751" observedRunningTime="2025-11-25 21:46:03.342631785 +0000 UTC m=+918.805108107" watchObservedRunningTime="2025-11-25 21:46:03.343833327 +0000 UTC m=+918.806309649" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.682328 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-lz25t"] Nov 25 21:46:03 crc kubenswrapper[4910]: E1125 21:46:03.684173 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cfcad7b-815e-4079-bbf1-f1dd84641d31" containerName="extract-utilities" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.684213 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cfcad7b-815e-4079-bbf1-f1dd84641d31" containerName="extract-utilities" Nov 25 21:46:03 crc kubenswrapper[4910]: E1125 21:46:03.684233 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be77737c-2252-4f34-8806-df94fea74276" containerName="registry-server" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.684259 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="be77737c-2252-4f34-8806-df94fea74276" containerName="registry-server" Nov 25 21:46:03 crc kubenswrapper[4910]: E1125 21:46:03.684274 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cfcad7b-815e-4079-bbf1-f1dd84641d31" containerName="extract-content" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.684284 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cfcad7b-815e-4079-bbf1-f1dd84641d31" containerName="extract-content" Nov 25 21:46:03 crc kubenswrapper[4910]: E1125 21:46:03.684296 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be77737c-2252-4f34-8806-df94fea74276" containerName="extract-content" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.684303 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="be77737c-2252-4f34-8806-df94fea74276" containerName="extract-content" Nov 25 21:46:03 crc kubenswrapper[4910]: E1125 21:46:03.684319 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be77737c-2252-4f34-8806-df94fea74276" containerName="extract-utilities" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.684326 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="be77737c-2252-4f34-8806-df94fea74276" containerName="extract-utilities" Nov 25 21:46:03 crc kubenswrapper[4910]: E1125 21:46:03.684338 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5cfcad7b-815e-4079-bbf1-f1dd84641d31" containerName="registry-server" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.684346 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5cfcad7b-815e-4079-bbf1-f1dd84641d31" containerName="registry-server" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.686167 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5cfcad7b-815e-4079-bbf1-f1dd84641d31" containerName="registry-server" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.686198 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="be77737c-2252-4f34-8806-df94fea74276" containerName="registry-server" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.686807 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.688959 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.691851 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-lz25t"] Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.713104 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d73b450a-c8fd-47c7-918c-273ae5d10b8a-ovn-rundir\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.713167 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d73b450a-c8fd-47c7-918c-273ae5d10b8a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.713208 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d73b450a-c8fd-47c7-918c-273ae5d10b8a-ovs-rundir\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.713326 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sbqdv\" (UniqueName: \"kubernetes.io/projected/d73b450a-c8fd-47c7-918c-273ae5d10b8a-kube-api-access-sbqdv\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.713343 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d73b450a-c8fd-47c7-918c-273ae5d10b8a-config\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.713372 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d73b450a-c8fd-47c7-918c-273ae5d10b8a-combined-ca-bundle\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.817399 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d73b450a-c8fd-47c7-918c-273ae5d10b8a-ovn-rundir\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.817616 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d73b450a-c8fd-47c7-918c-273ae5d10b8a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.817995 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/d73b450a-c8fd-47c7-918c-273ae5d10b8a-ovn-rundir\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.819336 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d73b450a-c8fd-47c7-918c-273ae5d10b8a-ovs-rundir\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.819452 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sbqdv\" (UniqueName: \"kubernetes.io/projected/d73b450a-c8fd-47c7-918c-273ae5d10b8a-kube-api-access-sbqdv\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.819475 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d73b450a-c8fd-47c7-918c-273ae5d10b8a-config\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.819542 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d73b450a-c8fd-47c7-918c-273ae5d10b8a-combined-ca-bundle\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.820838 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/d73b450a-c8fd-47c7-918c-273ae5d10b8a-ovs-rundir\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.825572 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d73b450a-c8fd-47c7-918c-273ae5d10b8a-config\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.829034 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d73b450a-c8fd-47c7-918c-273ae5d10b8a-combined-ca-bundle\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.829604 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/d73b450a-c8fd-47c7-918c-273ae5d10b8a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.853165 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sbqdv\" (UniqueName: \"kubernetes.io/projected/d73b450a-c8fd-47c7-918c-273ae5d10b8a-kube-api-access-sbqdv\") pod \"ovn-controller-metrics-lz25t\" (UID: \"d73b450a-c8fd-47c7-918c-273ae5d10b8a\") " pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.884552 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-pllqq"] Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.934793 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-ncqdt"] Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.936631 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.939797 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 25 21:46:03 crc kubenswrapper[4910]: I1125 21:46:03.971033 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-ncqdt"] Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.023475 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-ncqdt\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.023645 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-config\") pod \"dnsmasq-dns-6bc7876d45-ncqdt\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.023681 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9xcw\" (UniqueName: \"kubernetes.io/projected/55241105-f5fd-43b7-bb49-14bc09d3fb9c-kube-api-access-c9xcw\") pod \"dnsmasq-dns-6bc7876d45-ncqdt\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.023709 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-ncqdt\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.040741 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-lz25t" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.057296 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-sprkw"] Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.090333 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-fmm6b"] Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.091817 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.094610 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.127629 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-ncqdt\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.127687 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vk5sn\" (UniqueName: \"kubernetes.io/projected/07068c0b-39ba-423b-b6f6-5f68f568a6fe-kube-api-access-vk5sn\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.127745 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-config\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.127816 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-config\") pod \"dnsmasq-dns-6bc7876d45-ncqdt\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.127839 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9xcw\" (UniqueName: \"kubernetes.io/projected/55241105-f5fd-43b7-bb49-14bc09d3fb9c-kube-api-access-c9xcw\") pod \"dnsmasq-dns-6bc7876d45-ncqdt\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.127883 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.127904 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-ncqdt\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.127920 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-dns-svc\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.127999 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.130550 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-dns-svc\") pod \"dnsmasq-dns-6bc7876d45-ncqdt\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.131217 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc7876d45-ncqdt\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.135554 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-fmm6b"] Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.156800 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9xcw\" (UniqueName: \"kubernetes.io/projected/55241105-f5fd-43b7-bb49-14bc09d3fb9c-kube-api-access-c9xcw\") pod \"dnsmasq-dns-6bc7876d45-ncqdt\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.200029 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-config\") pod \"dnsmasq-dns-6bc7876d45-ncqdt\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.229093 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.229158 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vk5sn\" (UniqueName: \"kubernetes.io/projected/07068c0b-39ba-423b-b6f6-5f68f568a6fe-kube-api-access-vk5sn\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.229189 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-config\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.229230 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.229263 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-dns-svc\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.230362 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-dns-svc\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.230900 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.231470 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.232337 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-config\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.257618 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vk5sn\" (UniqueName: \"kubernetes.io/projected/07068c0b-39ba-423b-b6f6-5f68f568a6fe-kube-api-access-vk5sn\") pod \"dnsmasq-dns-8554648995-fmm6b\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.266539 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.324329 4910 generic.go:334] "Generic (PLEG): container finished" podID="d271e423-f378-4368-b055-d89cea058d38" containerID="b914217eb6875d82b28c33e830ea1e062c690627ca231966745a1482af0a89a7" exitCode=0 Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.326618 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-dbkwd" event={"ID":"d271e423-f378-4368-b055-d89cea058d38","Type":"ContainerDied","Data":"b914217eb6875d82b28c33e830ea1e062c690627ca231966745a1482af0a89a7"} Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.478167 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-pllqq" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.519914 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.526599 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.532420 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-config\") pod \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\" (UID: \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\") " Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.532547 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-dns-svc\") pod \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\" (UID: \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\") " Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.532638 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6h72\" (UniqueName: \"kubernetes.io/projected/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-kube-api-access-k6h72\") pod \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\" (UID: \"e3612f97-45d4-4d0c-81b1-19bb2e73f44f\") " Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.533261 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-config" (OuterVolumeSpecName: "config") pod "e3612f97-45d4-4d0c-81b1-19bb2e73f44f" (UID: "e3612f97-45d4-4d0c-81b1-19bb2e73f44f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.536837 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e3612f97-45d4-4d0c-81b1-19bb2e73f44f" (UID: "e3612f97-45d4-4d0c-81b1-19bb2e73f44f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.542142 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-kube-api-access-k6h72" (OuterVolumeSpecName: "kube-api-access-k6h72") pod "e3612f97-45d4-4d0c-81b1-19bb2e73f44f" (UID: "e3612f97-45d4-4d0c-81b1-19bb2e73f44f"). InnerVolumeSpecName "kube-api-access-k6h72". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.634128 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/221cba90-1fbe-4621-a05c-fccbe18d2b92-config\") pod \"221cba90-1fbe-4621-a05c-fccbe18d2b92\" (UID: \"221cba90-1fbe-4621-a05c-fccbe18d2b92\") " Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.634300 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/221cba90-1fbe-4621-a05c-fccbe18d2b92-dns-svc\") pod \"221cba90-1fbe-4621-a05c-fccbe18d2b92\" (UID: \"221cba90-1fbe-4621-a05c-fccbe18d2b92\") " Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.634432 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xhmw\" (UniqueName: \"kubernetes.io/projected/221cba90-1fbe-4621-a05c-fccbe18d2b92-kube-api-access-8xhmw\") pod \"221cba90-1fbe-4621-a05c-fccbe18d2b92\" (UID: \"221cba90-1fbe-4621-a05c-fccbe18d2b92\") " Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.634858 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.634877 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6h72\" (UniqueName: \"kubernetes.io/projected/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-kube-api-access-k6h72\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.634892 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e3612f97-45d4-4d0c-81b1-19bb2e73f44f-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.634767 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/221cba90-1fbe-4621-a05c-fccbe18d2b92-config" (OuterVolumeSpecName: "config") pod "221cba90-1fbe-4621-a05c-fccbe18d2b92" (UID: "221cba90-1fbe-4621-a05c-fccbe18d2b92"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.634989 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/221cba90-1fbe-4621-a05c-fccbe18d2b92-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "221cba90-1fbe-4621-a05c-fccbe18d2b92" (UID: "221cba90-1fbe-4621-a05c-fccbe18d2b92"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.638333 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/221cba90-1fbe-4621-a05c-fccbe18d2b92-kube-api-access-8xhmw" (OuterVolumeSpecName: "kube-api-access-8xhmw") pod "221cba90-1fbe-4621-a05c-fccbe18d2b92" (UID: "221cba90-1fbe-4621-a05c-fccbe18d2b92"). InnerVolumeSpecName "kube-api-access-8xhmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.719357 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-lz25t"] Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.737391 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/221cba90-1fbe-4621-a05c-fccbe18d2b92-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.737427 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xhmw\" (UniqueName: \"kubernetes.io/projected/221cba90-1fbe-4621-a05c-fccbe18d2b92-kube-api-access-8xhmw\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.737439 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/221cba90-1fbe-4621-a05c-fccbe18d2b92-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:04 crc kubenswrapper[4910]: I1125 21:46:04.925948 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-ncqdt"] Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.048717 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-fmm6b"] Nov 25 21:46:05 crc kubenswrapper[4910]: W1125 21:46:05.065060 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod07068c0b_39ba_423b_b6f6_5f68f568a6fe.slice/crio-b2952e010784bd2d8ada6180307bd21bd62f02d7fe1868386502f53c5e8b1789 WatchSource:0}: Error finding container b2952e010784bd2d8ada6180307bd21bd62f02d7fe1868386502f53c5e8b1789: Status 404 returned error can't find the container with id b2952e010784bd2d8ada6180307bd21bd62f02d7fe1868386502f53c5e8b1789 Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.345823 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" event={"ID":"221cba90-1fbe-4621-a05c-fccbe18d2b92","Type":"ContainerDied","Data":"e5d3c3e6378dd9aef49dbb96ba17fbd2078bb3e8f2471c9935f047030e7e2e1a"} Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.346363 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-sprkw" Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.354143 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-dbkwd" event={"ID":"d271e423-f378-4368-b055-d89cea058d38","Type":"ContainerStarted","Data":"5ee8792d31013aaaf0f17ef2db2ef772746bb1cd88ee862578dfb13919fe8368"} Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.354201 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-dbkwd" event={"ID":"d271e423-f378-4368-b055-d89cea058d38","Type":"ContainerStarted","Data":"6f66e784d0e52b3ffabdf5c5d4b33d46cbf5151c07bd009d65cf41a825010759"} Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.354272 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.354304 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.361540 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-pllqq" event={"ID":"e3612f97-45d4-4d0c-81b1-19bb2e73f44f","Type":"ContainerDied","Data":"f16b6aca8c34645801db13c22805f528a0c254839844b8b1f0724460744ab7e7"} Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.361586 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-pllqq" Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.366228 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-lz25t" event={"ID":"d73b450a-c8fd-47c7-918c-273ae5d10b8a","Type":"ContainerStarted","Data":"c5f786e4873777ca6b219a62654a8881323809546e3abde4706ee35367f618a7"} Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.369617 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-fmm6b" event={"ID":"07068c0b-39ba-423b-b6f6-5f68f568a6fe","Type":"ContainerStarted","Data":"b2952e010784bd2d8ada6180307bd21bd62f02d7fe1868386502f53c5e8b1789"} Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.375159 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9b20e3e8-ac28-471d-82ed-e619a78a7c55","Type":"ContainerStarted","Data":"a663883b01e5d94c3a7a51f2d4075ac478de107d5f88233f99ea3c93dfe1bda9"} Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.399474 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" event={"ID":"55241105-f5fd-43b7-bb49-14bc09d3fb9c","Type":"ContainerStarted","Data":"b57d50a0844491c03d237799518b148f34446c5037924558adf490ff31f5b056"} Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.432302 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-pllqq"] Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.439903 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-pllqq"] Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.459076 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-dbkwd" podStartSLOduration=20.813447582 podStartE2EDuration="25.459054413s" podCreationTimestamp="2025-11-25 21:45:40 +0000 UTC" firstStartedPulling="2025-11-25 21:45:57.946185724 +0000 UTC m=+913.408662066" lastFinishedPulling="2025-11-25 21:46:02.591792575 +0000 UTC m=+918.054268897" observedRunningTime="2025-11-25 21:46:05.454814631 +0000 UTC m=+920.917290953" watchObservedRunningTime="2025-11-25 21:46:05.459054413 +0000 UTC m=+920.921530735" Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.505769 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-sprkw"] Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.509280 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-sprkw"] Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.812623 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.815980 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:46:05 crc kubenswrapper[4910]: I1125 21:46:05.874975 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:46:06 crc kubenswrapper[4910]: I1125 21:46:06.410788 4910 generic.go:334] "Generic (PLEG): container finished" podID="07068c0b-39ba-423b-b6f6-5f68f568a6fe" containerID="8e96aacdf07d0b27e96b90e76b88ed369713b57159939705eb1e0e2a335f581a" exitCode=0 Nov 25 21:46:06 crc kubenswrapper[4910]: I1125 21:46:06.410886 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-fmm6b" event={"ID":"07068c0b-39ba-423b-b6f6-5f68f568a6fe","Type":"ContainerDied","Data":"8e96aacdf07d0b27e96b90e76b88ed369713b57159939705eb1e0e2a335f581a"} Nov 25 21:46:06 crc kubenswrapper[4910]: I1125 21:46:06.415354 4910 generic.go:334] "Generic (PLEG): container finished" podID="55241105-f5fd-43b7-bb49-14bc09d3fb9c" containerID="c800ef8edad75c29c6f707365e9e1d376ee766c02d73c65763f88851f8571be6" exitCode=0 Nov 25 21:46:06 crc kubenswrapper[4910]: I1125 21:46:06.415415 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" event={"ID":"55241105-f5fd-43b7-bb49-14bc09d3fb9c","Type":"ContainerDied","Data":"c800ef8edad75c29c6f707365e9e1d376ee766c02d73c65763f88851f8571be6"} Nov 25 21:46:06 crc kubenswrapper[4910]: I1125 21:46:06.418295 4910 generic.go:334] "Generic (PLEG): container finished" podID="ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc" containerID="43ce548a7cf08fddce35077f3bc11b30b0ff1975671a19fb7157b2c08ae1dd5d" exitCode=0 Nov 25 21:46:06 crc kubenswrapper[4910]: I1125 21:46:06.418334 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc","Type":"ContainerDied","Data":"43ce548a7cf08fddce35077f3bc11b30b0ff1975671a19fb7157b2c08ae1dd5d"} Nov 25 21:46:06 crc kubenswrapper[4910]: I1125 21:46:06.421700 4910 generic.go:334] "Generic (PLEG): container finished" podID="62283554-0498-4bac-b223-8d3c6d21b614" containerID="565ebec9fe2b7cb2d6be7d37978f5152e6287e7ba1d0a2eb22e5ce29a2a0ffc3" exitCode=0 Nov 25 21:46:06 crc kubenswrapper[4910]: I1125 21:46:06.421789 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"62283554-0498-4bac-b223-8d3c6d21b614","Type":"ContainerDied","Data":"565ebec9fe2b7cb2d6be7d37978f5152e6287e7ba1d0a2eb22e5ce29a2a0ffc3"} Nov 25 21:46:07 crc kubenswrapper[4910]: I1125 21:46:07.218773 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="221cba90-1fbe-4621-a05c-fccbe18d2b92" path="/var/lib/kubelet/pods/221cba90-1fbe-4621-a05c-fccbe18d2b92/volumes" Nov 25 21:46:07 crc kubenswrapper[4910]: I1125 21:46:07.219205 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3612f97-45d4-4d0c-81b1-19bb2e73f44f" path="/var/lib/kubelet/pods/e3612f97-45d4-4d0c-81b1-19bb2e73f44f/volumes" Nov 25 21:46:07 crc kubenswrapper[4910]: I1125 21:46:07.507659 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:46:07 crc kubenswrapper[4910]: I1125 21:46:07.582024 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-44bld"] Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.452099 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc","Type":"ContainerStarted","Data":"ba7c809a473ea2557687cdf09f89473620e3a2b4e347727733dab870dc559294"} Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.453987 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"62283554-0498-4bac-b223-8d3c6d21b614","Type":"ContainerStarted","Data":"515b7569be7fdd94ff9a19f5c7753efb757f06b8217777d9d464faeb19412d21"} Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.456981 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-fmm6b" event={"ID":"07068c0b-39ba-423b-b6f6-5f68f568a6fe","Type":"ContainerStarted","Data":"e232ffbaf837378206e41067e3de37b088fac4e91500257e7ba0ca8131633841"} Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.457119 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.458733 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" event={"ID":"55241105-f5fd-43b7-bb49-14bc09d3fb9c","Type":"ContainerStarted","Data":"84da03a65976b1fd4476db7de84b1dbb6e526deb10b8bdbeefbb8e949fb27007"} Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.460107 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"d4285011-1eac-4f3c-af27-c6c6ad03d8de","Type":"ContainerStarted","Data":"5459ff465d1c0645e5e538b6bf9710cda1ba4c452f2f1bd751db25d1e9b46e61"} Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.461263 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"c811f98e-8a72-406b-b0c3-35a7102dd46e","Type":"ContainerStarted","Data":"8aad9f96c0cd3dc00541a4b6b18b0a5f67e4f6c37a6b03d5741345685782a713"} Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.461415 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.462284 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-lz25t" event={"ID":"d73b450a-c8fd-47c7-918c-273ae5d10b8a","Type":"ContainerStarted","Data":"5bf1a1cb8f3cf8ae659bfbf66bd04644e5f626bd9575680c7d34739efeb759a3"} Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.463669 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"d7e886f1-04bd-4061-9a6c-18a20a1d7cbe","Type":"ContainerStarted","Data":"daf9fb953282398431fdfecb4112a36900bdf274fe2ee0c91c7403b7ed59fb75"} Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.463867 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-44bld" podUID="f43e4991-59cf-42b2-a415-733675d21bb1" containerName="registry-server" containerID="cri-o://a14b863c73497d00630c53dbcc05cd34895fe1a4fcbca81116121c8adbec5aad" gracePeriod=2 Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.479697 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=13.34597189 podStartE2EDuration="38.479678352s" podCreationTimestamp="2025-11-25 21:45:31 +0000 UTC" firstStartedPulling="2025-11-25 21:45:33.946426906 +0000 UTC m=+889.408903228" lastFinishedPulling="2025-11-25 21:45:59.080133358 +0000 UTC m=+914.542609690" observedRunningTime="2025-11-25 21:46:09.477371561 +0000 UTC m=+924.939847903" watchObservedRunningTime="2025-11-25 21:46:09.479678352 +0000 UTC m=+924.942154674" Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.499059 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" podStartSLOduration=6.041739025 podStartE2EDuration="6.499038356s" podCreationTimestamp="2025-11-25 21:46:03 +0000 UTC" firstStartedPulling="2025-11-25 21:46:04.941149904 +0000 UTC m=+920.403626226" lastFinishedPulling="2025-11-25 21:46:05.398449235 +0000 UTC m=+920.860925557" observedRunningTime="2025-11-25 21:46:09.495831351 +0000 UTC m=+924.958307693" watchObservedRunningTime="2025-11-25 21:46:09.499038356 +0000 UTC m=+924.961514678" Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.524850 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=21.15404374 podStartE2EDuration="30.52482272s" podCreationTimestamp="2025-11-25 21:45:39 +0000 UTC" firstStartedPulling="2025-11-25 21:45:59.364984826 +0000 UTC m=+914.827461158" lastFinishedPulling="2025-11-25 21:46:08.735763806 +0000 UTC m=+924.198240138" observedRunningTime="2025-11-25 21:46:09.515813501 +0000 UTC m=+924.978289853" watchObservedRunningTime="2025-11-25 21:46:09.52482272 +0000 UTC m=+924.987299062" Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.539964 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=18.277012692 podStartE2EDuration="27.539938301s" podCreationTimestamp="2025-11-25 21:45:42 +0000 UTC" firstStartedPulling="2025-11-25 21:45:59.472656872 +0000 UTC m=+914.935133194" lastFinishedPulling="2025-11-25 21:46:08.735582491 +0000 UTC m=+924.198058803" observedRunningTime="2025-11-25 21:46:09.538983376 +0000 UTC m=+925.001459738" watchObservedRunningTime="2025-11-25 21:46:09.539938301 +0000 UTC m=+925.002414643" Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.567561 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-lz25t" podStartSLOduration=2.5916532820000002 podStartE2EDuration="6.567539073s" podCreationTimestamp="2025-11-25 21:46:03 +0000 UTC" firstStartedPulling="2025-11-25 21:46:04.771344509 +0000 UTC m=+920.233820831" lastFinishedPulling="2025-11-25 21:46:08.7472303 +0000 UTC m=+924.209706622" observedRunningTime="2025-11-25 21:46:09.563125846 +0000 UTC m=+925.025602178" watchObservedRunningTime="2025-11-25 21:46:09.567539073 +0000 UTC m=+925.030015395" Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.604032 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-fmm6b" podStartSLOduration=5.063547624 podStartE2EDuration="5.604015971s" podCreationTimestamp="2025-11-25 21:46:04 +0000 UTC" firstStartedPulling="2025-11-25 21:46:05.067137437 +0000 UTC m=+920.529613759" lastFinishedPulling="2025-11-25 21:46:05.607605784 +0000 UTC m=+921.070082106" observedRunningTime="2025-11-25 21:46:09.599935503 +0000 UTC m=+925.062411825" watchObservedRunningTime="2025-11-25 21:46:09.604015971 +0000 UTC m=+925.066492293" Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.622601 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=5.675062671 podStartE2EDuration="35.622580214s" podCreationTimestamp="2025-11-25 21:45:34 +0000 UTC" firstStartedPulling="2025-11-25 21:45:35.844860043 +0000 UTC m=+891.307336365" lastFinishedPulling="2025-11-25 21:46:05.792377586 +0000 UTC m=+921.254853908" observedRunningTime="2025-11-25 21:46:09.620657603 +0000 UTC m=+925.083133935" watchObservedRunningTime="2025-11-25 21:46:09.622580214 +0000 UTC m=+925.085056536" Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.654639 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=13.235458751 podStartE2EDuration="36.654623024s" podCreationTimestamp="2025-11-25 21:45:33 +0000 UTC" firstStartedPulling="2025-11-25 21:45:35.317813391 +0000 UTC m=+890.780289713" lastFinishedPulling="2025-11-25 21:45:58.736977664 +0000 UTC m=+914.199453986" observedRunningTime="2025-11-25 21:46:09.650099264 +0000 UTC m=+925.112575586" watchObservedRunningTime="2025-11-25 21:46:09.654623024 +0000 UTC m=+925.117099346" Nov 25 21:46:09 crc kubenswrapper[4910]: I1125 21:46:09.957678 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.053856 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f43e4991-59cf-42b2-a415-733675d21bb1-catalog-content\") pod \"f43e4991-59cf-42b2-a415-733675d21bb1\" (UID: \"f43e4991-59cf-42b2-a415-733675d21bb1\") " Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.053980 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f43e4991-59cf-42b2-a415-733675d21bb1-utilities\") pod \"f43e4991-59cf-42b2-a415-733675d21bb1\" (UID: \"f43e4991-59cf-42b2-a415-733675d21bb1\") " Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.054046 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjbfm\" (UniqueName: \"kubernetes.io/projected/f43e4991-59cf-42b2-a415-733675d21bb1-kube-api-access-cjbfm\") pod \"f43e4991-59cf-42b2-a415-733675d21bb1\" (UID: \"f43e4991-59cf-42b2-a415-733675d21bb1\") " Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.055336 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f43e4991-59cf-42b2-a415-733675d21bb1-utilities" (OuterVolumeSpecName: "utilities") pod "f43e4991-59cf-42b2-a415-733675d21bb1" (UID: "f43e4991-59cf-42b2-a415-733675d21bb1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.062890 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f43e4991-59cf-42b2-a415-733675d21bb1-kube-api-access-cjbfm" (OuterVolumeSpecName: "kube-api-access-cjbfm") pod "f43e4991-59cf-42b2-a415-733675d21bb1" (UID: "f43e4991-59cf-42b2-a415-733675d21bb1"). InnerVolumeSpecName "kube-api-access-cjbfm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.156972 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f43e4991-59cf-42b2-a415-733675d21bb1-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.157027 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjbfm\" (UniqueName: \"kubernetes.io/projected/f43e4991-59cf-42b2-a415-733675d21bb1-kube-api-access-cjbfm\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.171427 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f43e4991-59cf-42b2-a415-733675d21bb1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f43e4991-59cf-42b2-a415-733675d21bb1" (UID: "f43e4991-59cf-42b2-a415-733675d21bb1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.258354 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f43e4991-59cf-42b2-a415-733675d21bb1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.475546 4910 generic.go:334] "Generic (PLEG): container finished" podID="f43e4991-59cf-42b2-a415-733675d21bb1" containerID="a14b863c73497d00630c53dbcc05cd34895fe1a4fcbca81116121c8adbec5aad" exitCode=0 Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.475683 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-44bld" event={"ID":"f43e4991-59cf-42b2-a415-733675d21bb1","Type":"ContainerDied","Data":"a14b863c73497d00630c53dbcc05cd34895fe1a4fcbca81116121c8adbec5aad"} Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.475710 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-44bld" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.475720 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-44bld" event={"ID":"f43e4991-59cf-42b2-a415-733675d21bb1","Type":"ContainerDied","Data":"d11e574cacc3fdcd356e7b6b40cdbf8e670bb692501f11475ac830e160d52a35"} Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.475735 4910 scope.go:117] "RemoveContainer" containerID="a14b863c73497d00630c53dbcc05cd34895fe1a4fcbca81116121c8adbec5aad" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.478433 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"58eca84e-dfac-4af7-ad45-241a776f81d6","Type":"ContainerStarted","Data":"3a5caa60ba704cb56a0ba9be7c1f393181c0267978e6e6d9500155cc7e70b7c2"} Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.479497 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.514100 4910 scope.go:117] "RemoveContainer" containerID="44f98276e600f758d677728483174e3f9a724e12f423211a5cfccf57bbc64cb7" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.566926 4910 scope.go:117] "RemoveContainer" containerID="4177e0e170b51cd6f0ab188805f0ef4e2d5d94b1e460157e0215a698063ffc22" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.570806 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-44bld"] Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.582321 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-44bld"] Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.610357 4910 scope.go:117] "RemoveContainer" containerID="a14b863c73497d00630c53dbcc05cd34895fe1a4fcbca81116121c8adbec5aad" Nov 25 21:46:10 crc kubenswrapper[4910]: E1125 21:46:10.610762 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a14b863c73497d00630c53dbcc05cd34895fe1a4fcbca81116121c8adbec5aad\": container with ID starting with a14b863c73497d00630c53dbcc05cd34895fe1a4fcbca81116121c8adbec5aad not found: ID does not exist" containerID="a14b863c73497d00630c53dbcc05cd34895fe1a4fcbca81116121c8adbec5aad" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.610798 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a14b863c73497d00630c53dbcc05cd34895fe1a4fcbca81116121c8adbec5aad"} err="failed to get container status \"a14b863c73497d00630c53dbcc05cd34895fe1a4fcbca81116121c8adbec5aad\": rpc error: code = NotFound desc = could not find container \"a14b863c73497d00630c53dbcc05cd34895fe1a4fcbca81116121c8adbec5aad\": container with ID starting with a14b863c73497d00630c53dbcc05cd34895fe1a4fcbca81116121c8adbec5aad not found: ID does not exist" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.610821 4910 scope.go:117] "RemoveContainer" containerID="44f98276e600f758d677728483174e3f9a724e12f423211a5cfccf57bbc64cb7" Nov 25 21:46:10 crc kubenswrapper[4910]: E1125 21:46:10.611440 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44f98276e600f758d677728483174e3f9a724e12f423211a5cfccf57bbc64cb7\": container with ID starting with 44f98276e600f758d677728483174e3f9a724e12f423211a5cfccf57bbc64cb7 not found: ID does not exist" containerID="44f98276e600f758d677728483174e3f9a724e12f423211a5cfccf57bbc64cb7" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.611548 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44f98276e600f758d677728483174e3f9a724e12f423211a5cfccf57bbc64cb7"} err="failed to get container status \"44f98276e600f758d677728483174e3f9a724e12f423211a5cfccf57bbc64cb7\": rpc error: code = NotFound desc = could not find container \"44f98276e600f758d677728483174e3f9a724e12f423211a5cfccf57bbc64cb7\": container with ID starting with 44f98276e600f758d677728483174e3f9a724e12f423211a5cfccf57bbc64cb7 not found: ID does not exist" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.611624 4910 scope.go:117] "RemoveContainer" containerID="4177e0e170b51cd6f0ab188805f0ef4e2d5d94b1e460157e0215a698063ffc22" Nov 25 21:46:10 crc kubenswrapper[4910]: E1125 21:46:10.612500 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4177e0e170b51cd6f0ab188805f0ef4e2d5d94b1e460157e0215a698063ffc22\": container with ID starting with 4177e0e170b51cd6f0ab188805f0ef4e2d5d94b1e460157e0215a698063ffc22 not found: ID does not exist" containerID="4177e0e170b51cd6f0ab188805f0ef4e2d5d94b1e460157e0215a698063ffc22" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.612543 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4177e0e170b51cd6f0ab188805f0ef4e2d5d94b1e460157e0215a698063ffc22"} err="failed to get container status \"4177e0e170b51cd6f0ab188805f0ef4e2d5d94b1e460157e0215a698063ffc22\": rpc error: code = NotFound desc = could not find container \"4177e0e170b51cd6f0ab188805f0ef4e2d5d94b1e460157e0215a698063ffc22\": container with ID starting with 4177e0e170b51cd6f0ab188805f0ef4e2d5d94b1e460157e0215a698063ffc22 not found: ID does not exist" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.934781 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.935333 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 25 21:46:10 crc kubenswrapper[4910]: I1125 21:46:10.982972 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.220822 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f43e4991-59cf-42b2-a415-733675d21bb1" path="/var/lib/kubelet/pods/f43e4991-59cf-42b2-a415-733675d21bb1/volumes" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.260710 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.308019 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.490138 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.542879 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.549706 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.932372 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 25 21:46:11 crc kubenswrapper[4910]: E1125 21:46:11.932999 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f43e4991-59cf-42b2-a415-733675d21bb1" containerName="registry-server" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.933029 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f43e4991-59cf-42b2-a415-733675d21bb1" containerName="registry-server" Nov 25 21:46:11 crc kubenswrapper[4910]: E1125 21:46:11.933081 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f43e4991-59cf-42b2-a415-733675d21bb1" containerName="extract-utilities" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.933089 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f43e4991-59cf-42b2-a415-733675d21bb1" containerName="extract-utilities" Nov 25 21:46:11 crc kubenswrapper[4910]: E1125 21:46:11.933105 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f43e4991-59cf-42b2-a415-733675d21bb1" containerName="extract-content" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.933115 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f43e4991-59cf-42b2-a415-733675d21bb1" containerName="extract-content" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.933368 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f43e4991-59cf-42b2-a415-733675d21bb1" containerName="registry-server" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.934815 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.936876 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.940824 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.941530 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.941553 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-77hc4" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.944312 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.998217 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-scripts\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.998664 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-config\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.998839 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.999071 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.999237 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.999346 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-755sr\" (UniqueName: \"kubernetes.io/projected/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-kube-api-access-755sr\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:11 crc kubenswrapper[4910]: I1125 21:46:11.999912 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.101585 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.101638 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-scripts\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.101669 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-config\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.101707 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.101739 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.101764 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.101785 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-755sr\" (UniqueName: \"kubernetes.io/projected/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-kube-api-access-755sr\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.102390 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.103002 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-scripts\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.104443 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-config\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.110060 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.110221 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.112807 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.125483 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-755sr\" (UniqueName: \"kubernetes.io/projected/944fb5f5-a2bc-4328-bbec-203fbfb6cd20-kube-api-access-755sr\") pod \"ovn-northd-0\" (UID: \"944fb5f5-a2bc-4328-bbec-203fbfb6cd20\") " pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.256279 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 25 21:46:12 crc kubenswrapper[4910]: I1125 21:46:12.782676 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 25 21:46:13 crc kubenswrapper[4910]: I1125 21:46:13.178526 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 25 21:46:13 crc kubenswrapper[4910]: I1125 21:46:13.178614 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 25 21:46:13 crc kubenswrapper[4910]: I1125 21:46:13.509772 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"944fb5f5-a2bc-4328-bbec-203fbfb6cd20","Type":"ContainerStarted","Data":"0ab6389f3ae180cea48a256f97d0eb05bf61c586405eb4a0f4d2bc1154c0a5dc"} Nov 25 21:46:14 crc kubenswrapper[4910]: I1125 21:46:14.268771 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:14 crc kubenswrapper[4910]: I1125 21:46:14.529602 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:14 crc kubenswrapper[4910]: I1125 21:46:14.602679 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-ncqdt"] Nov 25 21:46:14 crc kubenswrapper[4910]: I1125 21:46:14.602948 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" podUID="55241105-f5fd-43b7-bb49-14bc09d3fb9c" containerName="dnsmasq-dns" containerID="cri-o://84da03a65976b1fd4476db7de84b1dbb6e526deb10b8bdbeefbb8e949fb27007" gracePeriod=10 Nov 25 21:46:14 crc kubenswrapper[4910]: I1125 21:46:14.650492 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 25 21:46:14 crc kubenswrapper[4910]: I1125 21:46:14.651163 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 25 21:46:15 crc kubenswrapper[4910]: I1125 21:46:15.044121 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 25 21:46:15 crc kubenswrapper[4910]: I1125 21:46:15.533314 4910 generic.go:334] "Generic (PLEG): container finished" podID="55241105-f5fd-43b7-bb49-14bc09d3fb9c" containerID="84da03a65976b1fd4476db7de84b1dbb6e526deb10b8bdbeefbb8e949fb27007" exitCode=0 Nov 25 21:46:15 crc kubenswrapper[4910]: I1125 21:46:15.533354 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" event={"ID":"55241105-f5fd-43b7-bb49-14bc09d3fb9c","Type":"ContainerDied","Data":"84da03a65976b1fd4476db7de84b1dbb6e526deb10b8bdbeefbb8e949fb27007"} Nov 25 21:46:15 crc kubenswrapper[4910]: I1125 21:46:15.908016 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.010485 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.780413 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-h8756"] Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.782734 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.796801 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-h8756"] Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.835717 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-config\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.835831 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9w9q\" (UniqueName: \"kubernetes.io/projected/de327dc7-6e50-4eb8-bc62-bfe861d55d45-kube-api-access-h9w9q\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.835878 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.835948 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.835984 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.878213 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.939957 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-config\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.940153 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9w9q\" (UniqueName: \"kubernetes.io/projected/de327dc7-6e50-4eb8-bc62-bfe861d55d45-kube-api-access-h9w9q\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.940216 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.940420 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.940454 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.941119 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-config\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.941353 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.941956 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.942287 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:16 crc kubenswrapper[4910]: I1125 21:46:16.996554 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9w9q\" (UniqueName: \"kubernetes.io/projected/de327dc7-6e50-4eb8-bc62-bfe861d55d45-kube-api-access-h9w9q\") pod \"dnsmasq-dns-b8fbc5445-h8756\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.149960 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.458531 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.550344 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9xcw\" (UniqueName: \"kubernetes.io/projected/55241105-f5fd-43b7-bb49-14bc09d3fb9c-kube-api-access-c9xcw\") pod \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.550822 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-dns-svc\") pod \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.550992 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-ovsdbserver-sb\") pod \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.551027 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-config\") pod \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\" (UID: \"55241105-f5fd-43b7-bb49-14bc09d3fb9c\") " Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.553925 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" event={"ID":"55241105-f5fd-43b7-bb49-14bc09d3fb9c","Type":"ContainerDied","Data":"b57d50a0844491c03d237799518b148f34446c5037924558adf490ff31f5b056"} Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.553975 4910 scope.go:117] "RemoveContainer" containerID="84da03a65976b1fd4476db7de84b1dbb6e526deb10b8bdbeefbb8e949fb27007" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.553980 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc7876d45-ncqdt" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.558581 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55241105-f5fd-43b7-bb49-14bc09d3fb9c-kube-api-access-c9xcw" (OuterVolumeSpecName: "kube-api-access-c9xcw") pod "55241105-f5fd-43b7-bb49-14bc09d3fb9c" (UID: "55241105-f5fd-43b7-bb49-14bc09d3fb9c"). InnerVolumeSpecName "kube-api-access-c9xcw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.587905 4910 scope.go:117] "RemoveContainer" containerID="c800ef8edad75c29c6f707365e9e1d376ee766c02d73c65763f88851f8571be6" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.606861 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-config" (OuterVolumeSpecName: "config") pod "55241105-f5fd-43b7-bb49-14bc09d3fb9c" (UID: "55241105-f5fd-43b7-bb49-14bc09d3fb9c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.607118 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "55241105-f5fd-43b7-bb49-14bc09d3fb9c" (UID: "55241105-f5fd-43b7-bb49-14bc09d3fb9c"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.609095 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "55241105-f5fd-43b7-bb49-14bc09d3fb9c" (UID: "55241105-f5fd-43b7-bb49-14bc09d3fb9c"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.652960 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.652996 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.653009 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9xcw\" (UniqueName: \"kubernetes.io/projected/55241105-f5fd-43b7-bb49-14bc09d3fb9c-kube-api-access-c9xcw\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.653022 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/55241105-f5fd-43b7-bb49-14bc09d3fb9c-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.752716 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-h8756"] Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.902229 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 25 21:46:17 crc kubenswrapper[4910]: E1125 21:46:17.902733 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55241105-f5fd-43b7-bb49-14bc09d3fb9c" containerName="dnsmasq-dns" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.902752 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="55241105-f5fd-43b7-bb49-14bc09d3fb9c" containerName="dnsmasq-dns" Nov 25 21:46:17 crc kubenswrapper[4910]: E1125 21:46:17.902776 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55241105-f5fd-43b7-bb49-14bc09d3fb9c" containerName="init" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.902785 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="55241105-f5fd-43b7-bb49-14bc09d3fb9c" containerName="init" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.903087 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="55241105-f5fd-43b7-bb49-14bc09d3fb9c" containerName="dnsmasq-dns" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.915736 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.917223 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-ncqdt"] Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.922941 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-z2wwz" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.925612 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.925801 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.926303 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bc7876d45-ncqdt"] Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.931553 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 25 21:46:17 crc kubenswrapper[4910]: I1125 21:46:17.935542 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.061500 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glsf5\" (UniqueName: \"kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-kube-api-access-glsf5\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.061806 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/df4c228a-b3ae-4de6-bd0b-a761692c4476-lock\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.061831 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.061878 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/df4c228a-b3ae-4de6-bd0b-a761692c4476-cache\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.061938 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: E1125 21:46:18.094752 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55241105_f5fd_43b7_bb49_14bc09d3fb9c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55241105_f5fd_43b7_bb49_14bc09d3fb9c.slice/crio-b57d50a0844491c03d237799518b148f34446c5037924558adf490ff31f5b056\": RecentStats: unable to find data in memory cache]" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.163498 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.163625 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glsf5\" (UniqueName: \"kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-kube-api-access-glsf5\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.163658 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/df4c228a-b3ae-4de6-bd0b-a761692c4476-lock\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: E1125 21:46:18.163686 4910 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 21:46:18 crc kubenswrapper[4910]: E1125 21:46:18.163706 4910 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 21:46:18 crc kubenswrapper[4910]: E1125 21:46:18.163762 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift podName:df4c228a-b3ae-4de6-bd0b-a761692c4476 nodeName:}" failed. No retries permitted until 2025-11-25 21:46:18.663743396 +0000 UTC m=+934.126219718 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift") pod "swift-storage-0" (UID: "df4c228a-b3ae-4de6-bd0b-a761692c4476") : configmap "swift-ring-files" not found Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.163689 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.163956 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/df4c228a-b3ae-4de6-bd0b-a761692c4476-cache\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.164053 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.164378 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/df4c228a-b3ae-4de6-bd0b-a761692c4476-cache\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.164454 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/df4c228a-b3ae-4de6-bd0b-a761692c4476-lock\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.185146 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glsf5\" (UniqueName: \"kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-kube-api-access-glsf5\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.186517 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.569039 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"944fb5f5-a2bc-4328-bbec-203fbfb6cd20","Type":"ContainerStarted","Data":"22fa8fb29d81a4c9f3a9e92ec6c58b3a041b88bd05381f346b5a785c1085d1f9"} Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.569116 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"944fb5f5-a2bc-4328-bbec-203fbfb6cd20","Type":"ContainerStarted","Data":"9ed578f403cb2cc1c549fd3b84b15d322522129c31fdfba3807501e13c558de3"} Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.569142 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.570785 4910 generic.go:334] "Generic (PLEG): container finished" podID="de327dc7-6e50-4eb8-bc62-bfe861d55d45" containerID="057825e17ca858c077568f1a0b04e932412a79ee6f8a5d8fce9e5c7fca4079f5" exitCode=0 Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.570895 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-h8756" event={"ID":"de327dc7-6e50-4eb8-bc62-bfe861d55d45","Type":"ContainerDied","Data":"057825e17ca858c077568f1a0b04e932412a79ee6f8a5d8fce9e5c7fca4079f5"} Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.570956 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-h8756" event={"ID":"de327dc7-6e50-4eb8-bc62-bfe861d55d45","Type":"ContainerStarted","Data":"e6fe552b94badc5425cd43e2407d67a29b037d681b002fc536c55df8dad2c748"} Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.617462 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.068744073 podStartE2EDuration="7.617435843s" podCreationTimestamp="2025-11-25 21:46:11 +0000 UTC" firstStartedPulling="2025-11-25 21:46:12.789218636 +0000 UTC m=+928.251694958" lastFinishedPulling="2025-11-25 21:46:17.337910406 +0000 UTC m=+932.800386728" observedRunningTime="2025-11-25 21:46:18.598396398 +0000 UTC m=+934.060872720" watchObservedRunningTime="2025-11-25 21:46:18.617435843 +0000 UTC m=+934.079912165" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.672521 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:18 crc kubenswrapper[4910]: E1125 21:46:18.673470 4910 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 21:46:18 crc kubenswrapper[4910]: E1125 21:46:18.673518 4910 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 21:46:18 crc kubenswrapper[4910]: E1125 21:46:18.673586 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift podName:df4c228a-b3ae-4de6-bd0b-a761692c4476 nodeName:}" failed. No retries permitted until 2025-11-25 21:46:19.673554152 +0000 UTC m=+935.136030474 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift") pod "swift-storage-0" (UID: "df4c228a-b3ae-4de6-bd0b-a761692c4476") : configmap "swift-ring-files" not found Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.777975 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 25 21:46:18 crc kubenswrapper[4910]: I1125 21:46:18.865229 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 25 21:46:19 crc kubenswrapper[4910]: I1125 21:46:19.218148 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="55241105-f5fd-43b7-bb49-14bc09d3fb9c" path="/var/lib/kubelet/pods/55241105-f5fd-43b7-bb49-14bc09d3fb9c/volumes" Nov 25 21:46:19 crc kubenswrapper[4910]: I1125 21:46:19.584370 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-h8756" event={"ID":"de327dc7-6e50-4eb8-bc62-bfe861d55d45","Type":"ContainerStarted","Data":"895859d9d08b4c52a4826f7ebd3b17a15002856be9890110f021a91ed79c95d9"} Nov 25 21:46:19 crc kubenswrapper[4910]: I1125 21:46:19.606656 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-h8756" podStartSLOduration=3.6066315060000003 podStartE2EDuration="3.606631506s" podCreationTimestamp="2025-11-25 21:46:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:46:19.600801041 +0000 UTC m=+935.063277363" watchObservedRunningTime="2025-11-25 21:46:19.606631506 +0000 UTC m=+935.069107828" Nov 25 21:46:19 crc kubenswrapper[4910]: I1125 21:46:19.692191 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:19 crc kubenswrapper[4910]: E1125 21:46:19.692365 4910 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 21:46:19 crc kubenswrapper[4910]: E1125 21:46:19.692394 4910 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 21:46:19 crc kubenswrapper[4910]: E1125 21:46:19.692445 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift podName:df4c228a-b3ae-4de6-bd0b-a761692c4476 nodeName:}" failed. No retries permitted until 2025-11-25 21:46:21.692426942 +0000 UTC m=+937.154903264 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift") pod "swift-storage-0" (UID: "df4c228a-b3ae-4de6-bd0b-a761692c4476") : configmap "swift-ring-files" not found Nov 25 21:46:20 crc kubenswrapper[4910]: I1125 21:46:20.591023 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.728799 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:21 crc kubenswrapper[4910]: E1125 21:46:21.729594 4910 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 21:46:21 crc kubenswrapper[4910]: E1125 21:46:21.729617 4910 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 21:46:21 crc kubenswrapper[4910]: E1125 21:46:21.729665 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift podName:df4c228a-b3ae-4de6-bd0b-a761692c4476 nodeName:}" failed. No retries permitted until 2025-11-25 21:46:25.729647171 +0000 UTC m=+941.192123493 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift") pod "swift-storage-0" (UID: "df4c228a-b3ae-4de6-bd0b-a761692c4476") : configmap "swift-ring-files" not found Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.857576 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-d6nbg"] Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.859166 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.878774 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.882981 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.883772 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.890555 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-d6nbg"] Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.918443 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-v7clx"] Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.919739 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.934668 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-ring-data-devices\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.934727 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-dispersionconf\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.934768 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-etc-swift\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.934903 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-swiftconf\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.935114 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-scripts\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.935406 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9nqx\" (UniqueName: \"kubernetes.io/projected/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-kube-api-access-c9nqx\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.935446 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-combined-ca-bundle\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.943659 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-v7clx"] Nov 25 21:46:21 crc kubenswrapper[4910]: I1125 21:46:21.949984 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-d6nbg"] Nov 25 21:46:21 crc kubenswrapper[4910]: E1125 21:46:21.950402 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle dispersionconf etc-swift kube-api-access-c9nqx ring-data-devices scripts swiftconf], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/swift-ring-rebalance-d6nbg" podUID="ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.036859 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-scripts\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.036919 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-combined-ca-bundle\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.036945 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-swiftconf\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.036969 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5050ee25-88de-4888-ba01-fc11c71df0a1-ring-data-devices\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.036987 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9nqx\" (UniqueName: \"kubernetes.io/projected/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-kube-api-access-c9nqx\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.037004 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-combined-ca-bundle\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.037025 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-dispersionconf\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.037040 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7q4s\" (UniqueName: \"kubernetes.io/projected/5050ee25-88de-4888-ba01-fc11c71df0a1-kube-api-access-w7q4s\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.037058 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-ring-data-devices\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.037073 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5050ee25-88de-4888-ba01-fc11c71df0a1-etc-swift\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.037099 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-dispersionconf\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.037116 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5050ee25-88de-4888-ba01-fc11c71df0a1-scripts\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.037141 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-etc-swift\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.037158 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-swiftconf\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.038143 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-etc-swift\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.038197 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-ring-data-devices\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.038431 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-scripts\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.041894 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-swiftconf\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.042016 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-dispersionconf\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.043179 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-combined-ca-bundle\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.054921 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9nqx\" (UniqueName: \"kubernetes.io/projected/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-kube-api-access-c9nqx\") pod \"swift-ring-rebalance-d6nbg\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.140022 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-combined-ca-bundle\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.140073 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-swiftconf\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.140124 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5050ee25-88de-4888-ba01-fc11c71df0a1-ring-data-devices\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.140151 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-dispersionconf\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.140174 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7q4s\" (UniqueName: \"kubernetes.io/projected/5050ee25-88de-4888-ba01-fc11c71df0a1-kube-api-access-w7q4s\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.140201 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5050ee25-88de-4888-ba01-fc11c71df0a1-etc-swift\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.140266 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5050ee25-88de-4888-ba01-fc11c71df0a1-scripts\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.141217 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5050ee25-88de-4888-ba01-fc11c71df0a1-ring-data-devices\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.141473 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5050ee25-88de-4888-ba01-fc11c71df0a1-scripts\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.141661 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5050ee25-88de-4888-ba01-fc11c71df0a1-etc-swift\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.144092 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-dispersionconf\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.144279 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-combined-ca-bundle\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.144827 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-swiftconf\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.157857 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7q4s\" (UniqueName: \"kubernetes.io/projected/5050ee25-88de-4888-ba01-fc11c71df0a1-kube-api-access-w7q4s\") pod \"swift-ring-rebalance-v7clx\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.248821 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.607806 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.618591 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.652803 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-v7clx"] Nov 25 21:46:22 crc kubenswrapper[4910]: W1125 21:46:22.653791 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5050ee25_88de_4888_ba01_fc11c71df0a1.slice/crio-b5ffa97a78f1abc3e0e6c84ce2c611f6d50cdd9a2d0cec587abd9da12b0bbcfd WatchSource:0}: Error finding container b5ffa97a78f1abc3e0e6c84ce2c611f6d50cdd9a2d0cec587abd9da12b0bbcfd: Status 404 returned error can't find the container with id b5ffa97a78f1abc3e0e6c84ce2c611f6d50cdd9a2d0cec587abd9da12b0bbcfd Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.750611 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-etc-swift\") pod \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.750715 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-ring-data-devices\") pod \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.750816 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-swiftconf\") pod \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.750873 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-dispersionconf\") pod \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.750948 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a" (UID: "ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.750966 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9nqx\" (UniqueName: \"kubernetes.io/projected/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-kube-api-access-c9nqx\") pod \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.751142 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-scripts\") pod \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.751228 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-combined-ca-bundle\") pod \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\" (UID: \"ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a\") " Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.751284 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a" (UID: "ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.751657 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-scripts" (OuterVolumeSpecName: "scripts") pod "ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a" (UID: "ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.751864 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.751886 4910 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.751896 4910 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.755544 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a" (UID: "ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.755678 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-kube-api-access-c9nqx" (OuterVolumeSpecName: "kube-api-access-c9nqx") pod "ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a" (UID: "ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a"). InnerVolumeSpecName "kube-api-access-c9nqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.756357 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a" (UID: "ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.757193 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a" (UID: "ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.853614 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9nqx\" (UniqueName: \"kubernetes.io/projected/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-kube-api-access-c9nqx\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.853661 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.853671 4910 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:22 crc kubenswrapper[4910]: I1125 21:46:22.853680 4910 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:23 crc kubenswrapper[4910]: I1125 21:46:23.099107 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:46:23 crc kubenswrapper[4910]: I1125 21:46:23.099452 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:46:23 crc kubenswrapper[4910]: I1125 21:46:23.099508 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:46:23 crc kubenswrapper[4910]: I1125 21:46:23.100382 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8368a57726af5a6b75ce9b9efb9fa3828db0cba5637cfb1aba6ea91ccf50acb2"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 21:46:23 crc kubenswrapper[4910]: I1125 21:46:23.100443 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://8368a57726af5a6b75ce9b9efb9fa3828db0cba5637cfb1aba6ea91ccf50acb2" gracePeriod=600 Nov 25 21:46:23 crc kubenswrapper[4910]: I1125 21:46:23.620932 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="8368a57726af5a6b75ce9b9efb9fa3828db0cba5637cfb1aba6ea91ccf50acb2" exitCode=0 Nov 25 21:46:23 crc kubenswrapper[4910]: I1125 21:46:23.621005 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"8368a57726af5a6b75ce9b9efb9fa3828db0cba5637cfb1aba6ea91ccf50acb2"} Nov 25 21:46:23 crc kubenswrapper[4910]: I1125 21:46:23.621035 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"0b0066f1a169222bc8e764ac54716c8dcd57922f8eb880531d5e609e43cc685c"} Nov 25 21:46:23 crc kubenswrapper[4910]: I1125 21:46:23.621052 4910 scope.go:117] "RemoveContainer" containerID="1d449a51cc6d0f8601906171d97e528f4369d984db9458b4317c75e761fb730e" Nov 25 21:46:23 crc kubenswrapper[4910]: I1125 21:46:23.626569 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-v7clx" event={"ID":"5050ee25-88de-4888-ba01-fc11c71df0a1","Type":"ContainerStarted","Data":"b5ffa97a78f1abc3e0e6c84ce2c611f6d50cdd9a2d0cec587abd9da12b0bbcfd"} Nov 25 21:46:23 crc kubenswrapper[4910]: I1125 21:46:23.626604 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-d6nbg" Nov 25 21:46:23 crc kubenswrapper[4910]: I1125 21:46:23.695592 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-d6nbg"] Nov 25 21:46:23 crc kubenswrapper[4910]: I1125 21:46:23.707922 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-d6nbg"] Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.401935 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-a73a-account-create-update-96g2p"] Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.403422 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a73a-account-create-update-96g2p" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.405935 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.412850 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-bf9cj"] Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.414171 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-bf9cj" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.421919 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-a73a-account-create-update-96g2p"] Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.431136 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-bf9cj"] Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.485962 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/060d8a13-8904-4f0b-967b-108554dec9e7-operator-scripts\") pod \"keystone-db-create-bf9cj\" (UID: \"060d8a13-8904-4f0b-967b-108554dec9e7\") " pod="openstack/keystone-db-create-bf9cj" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.486013 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9m855\" (UniqueName: \"kubernetes.io/projected/060d8a13-8904-4f0b-967b-108554dec9e7-kube-api-access-9m855\") pod \"keystone-db-create-bf9cj\" (UID: \"060d8a13-8904-4f0b-967b-108554dec9e7\") " pod="openstack/keystone-db-create-bf9cj" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.486073 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k7dvp\" (UniqueName: \"kubernetes.io/projected/e7dabc19-8625-4a01-9ad5-f0370ec7d608-kube-api-access-k7dvp\") pod \"keystone-a73a-account-create-update-96g2p\" (UID: \"e7dabc19-8625-4a01-9ad5-f0370ec7d608\") " pod="openstack/keystone-a73a-account-create-update-96g2p" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.486143 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7dabc19-8625-4a01-9ad5-f0370ec7d608-operator-scripts\") pod \"keystone-a73a-account-create-update-96g2p\" (UID: \"e7dabc19-8625-4a01-9ad5-f0370ec7d608\") " pod="openstack/keystone-a73a-account-create-update-96g2p" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.587418 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/060d8a13-8904-4f0b-967b-108554dec9e7-operator-scripts\") pod \"keystone-db-create-bf9cj\" (UID: \"060d8a13-8904-4f0b-967b-108554dec9e7\") " pod="openstack/keystone-db-create-bf9cj" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.587475 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9m855\" (UniqueName: \"kubernetes.io/projected/060d8a13-8904-4f0b-967b-108554dec9e7-kube-api-access-9m855\") pod \"keystone-db-create-bf9cj\" (UID: \"060d8a13-8904-4f0b-967b-108554dec9e7\") " pod="openstack/keystone-db-create-bf9cj" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.587516 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k7dvp\" (UniqueName: \"kubernetes.io/projected/e7dabc19-8625-4a01-9ad5-f0370ec7d608-kube-api-access-k7dvp\") pod \"keystone-a73a-account-create-update-96g2p\" (UID: \"e7dabc19-8625-4a01-9ad5-f0370ec7d608\") " pod="openstack/keystone-a73a-account-create-update-96g2p" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.587588 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7dabc19-8625-4a01-9ad5-f0370ec7d608-operator-scripts\") pod \"keystone-a73a-account-create-update-96g2p\" (UID: \"e7dabc19-8625-4a01-9ad5-f0370ec7d608\") " pod="openstack/keystone-a73a-account-create-update-96g2p" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.588789 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7dabc19-8625-4a01-9ad5-f0370ec7d608-operator-scripts\") pod \"keystone-a73a-account-create-update-96g2p\" (UID: \"e7dabc19-8625-4a01-9ad5-f0370ec7d608\") " pod="openstack/keystone-a73a-account-create-update-96g2p" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.590209 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/060d8a13-8904-4f0b-967b-108554dec9e7-operator-scripts\") pod \"keystone-db-create-bf9cj\" (UID: \"060d8a13-8904-4f0b-967b-108554dec9e7\") " pod="openstack/keystone-db-create-bf9cj" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.611223 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9m855\" (UniqueName: \"kubernetes.io/projected/060d8a13-8904-4f0b-967b-108554dec9e7-kube-api-access-9m855\") pod \"keystone-db-create-bf9cj\" (UID: \"060d8a13-8904-4f0b-967b-108554dec9e7\") " pod="openstack/keystone-db-create-bf9cj" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.621138 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k7dvp\" (UniqueName: \"kubernetes.io/projected/e7dabc19-8625-4a01-9ad5-f0370ec7d608-kube-api-access-k7dvp\") pod \"keystone-a73a-account-create-update-96g2p\" (UID: \"e7dabc19-8625-4a01-9ad5-f0370ec7d608\") " pod="openstack/keystone-a73a-account-create-update-96g2p" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.680850 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-fbfdb"] Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.684940 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-fbfdb" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.695402 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-fbfdb"] Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.726431 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a73a-account-create-update-96g2p" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.732170 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-bf9cj" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.791299 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e02073f-e958-48a0-8bf6-8e9959924424-operator-scripts\") pod \"placement-db-create-fbfdb\" (UID: \"8e02073f-e958-48a0-8bf6-8e9959924424\") " pod="openstack/placement-db-create-fbfdb" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.791445 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tz45l\" (UniqueName: \"kubernetes.io/projected/8e02073f-e958-48a0-8bf6-8e9959924424-kube-api-access-tz45l\") pod \"placement-db-create-fbfdb\" (UID: \"8e02073f-e958-48a0-8bf6-8e9959924424\") " pod="openstack/placement-db-create-fbfdb" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.796634 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-21bb-account-create-update-7plzn"] Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.798103 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-21bb-account-create-update-7plzn" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.800313 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.808994 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-21bb-account-create-update-7plzn"] Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.892813 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e02073f-e958-48a0-8bf6-8e9959924424-operator-scripts\") pod \"placement-db-create-fbfdb\" (UID: \"8e02073f-e958-48a0-8bf6-8e9959924424\") " pod="openstack/placement-db-create-fbfdb" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.892860 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tz45l\" (UniqueName: \"kubernetes.io/projected/8e02073f-e958-48a0-8bf6-8e9959924424-kube-api-access-tz45l\") pod \"placement-db-create-fbfdb\" (UID: \"8e02073f-e958-48a0-8bf6-8e9959924424\") " pod="openstack/placement-db-create-fbfdb" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.892893 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wchq7\" (UniqueName: \"kubernetes.io/projected/d01ae6d0-c74e-49fc-9681-0d6738b0f92b-kube-api-access-wchq7\") pod \"placement-21bb-account-create-update-7plzn\" (UID: \"d01ae6d0-c74e-49fc-9681-0d6738b0f92b\") " pod="openstack/placement-21bb-account-create-update-7plzn" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.892928 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d01ae6d0-c74e-49fc-9681-0d6738b0f92b-operator-scripts\") pod \"placement-21bb-account-create-update-7plzn\" (UID: \"d01ae6d0-c74e-49fc-9681-0d6738b0f92b\") " pod="openstack/placement-21bb-account-create-update-7plzn" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.893590 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e02073f-e958-48a0-8bf6-8e9959924424-operator-scripts\") pod \"placement-db-create-fbfdb\" (UID: \"8e02073f-e958-48a0-8bf6-8e9959924424\") " pod="openstack/placement-db-create-fbfdb" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.920155 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tz45l\" (UniqueName: \"kubernetes.io/projected/8e02073f-e958-48a0-8bf6-8e9959924424-kube-api-access-tz45l\") pod \"placement-db-create-fbfdb\" (UID: \"8e02073f-e958-48a0-8bf6-8e9959924424\") " pod="openstack/placement-db-create-fbfdb" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.994529 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wchq7\" (UniqueName: \"kubernetes.io/projected/d01ae6d0-c74e-49fc-9681-0d6738b0f92b-kube-api-access-wchq7\") pod \"placement-21bb-account-create-update-7plzn\" (UID: \"d01ae6d0-c74e-49fc-9681-0d6738b0f92b\") " pod="openstack/placement-21bb-account-create-update-7plzn" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.994590 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d01ae6d0-c74e-49fc-9681-0d6738b0f92b-operator-scripts\") pod \"placement-21bb-account-create-update-7plzn\" (UID: \"d01ae6d0-c74e-49fc-9681-0d6738b0f92b\") " pod="openstack/placement-21bb-account-create-update-7plzn" Nov 25 21:46:24 crc kubenswrapper[4910]: I1125 21:46:24.995334 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d01ae6d0-c74e-49fc-9681-0d6738b0f92b-operator-scripts\") pod \"placement-21bb-account-create-update-7plzn\" (UID: \"d01ae6d0-c74e-49fc-9681-0d6738b0f92b\") " pod="openstack/placement-21bb-account-create-update-7plzn" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.010005 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wchq7\" (UniqueName: \"kubernetes.io/projected/d01ae6d0-c74e-49fc-9681-0d6738b0f92b-kube-api-access-wchq7\") pod \"placement-21bb-account-create-update-7plzn\" (UID: \"d01ae6d0-c74e-49fc-9681-0d6738b0f92b\") " pod="openstack/placement-21bb-account-create-update-7plzn" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.012944 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-fbfdb" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.119038 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-21bb-account-create-update-7plzn" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.214652 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a" path="/var/lib/kubelet/pods/ed330e31-4b50-4e76-8a3c-3fbbdd9c4a6a/volumes" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.216194 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-nvnbv"] Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.218013 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-nvnbv" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.221914 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-nvnbv"] Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.300339 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvllq\" (UniqueName: \"kubernetes.io/projected/afe4e40b-44ed-477b-a1d8-b9b973042e11-kube-api-access-jvllq\") pod \"glance-db-create-nvnbv\" (UID: \"afe4e40b-44ed-477b-a1d8-b9b973042e11\") " pod="openstack/glance-db-create-nvnbv" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.300524 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/afe4e40b-44ed-477b-a1d8-b9b973042e11-operator-scripts\") pod \"glance-db-create-nvnbv\" (UID: \"afe4e40b-44ed-477b-a1d8-b9b973042e11\") " pod="openstack/glance-db-create-nvnbv" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.302518 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-89b5-account-create-update-h744x"] Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.303913 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-89b5-account-create-update-h744x" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.310078 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.314780 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-89b5-account-create-update-h744x"] Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.402478 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e32216a-8f9f-440a-9c2a-bba04eb3f0e3-operator-scripts\") pod \"glance-89b5-account-create-update-h744x\" (UID: \"6e32216a-8f9f-440a-9c2a-bba04eb3f0e3\") " pod="openstack/glance-89b5-account-create-update-h744x" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.402600 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvllq\" (UniqueName: \"kubernetes.io/projected/afe4e40b-44ed-477b-a1d8-b9b973042e11-kube-api-access-jvllq\") pod \"glance-db-create-nvnbv\" (UID: \"afe4e40b-44ed-477b-a1d8-b9b973042e11\") " pod="openstack/glance-db-create-nvnbv" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.402643 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqkj7\" (UniqueName: \"kubernetes.io/projected/6e32216a-8f9f-440a-9c2a-bba04eb3f0e3-kube-api-access-kqkj7\") pod \"glance-89b5-account-create-update-h744x\" (UID: \"6e32216a-8f9f-440a-9c2a-bba04eb3f0e3\") " pod="openstack/glance-89b5-account-create-update-h744x" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.402759 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/afe4e40b-44ed-477b-a1d8-b9b973042e11-operator-scripts\") pod \"glance-db-create-nvnbv\" (UID: \"afe4e40b-44ed-477b-a1d8-b9b973042e11\") " pod="openstack/glance-db-create-nvnbv" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.405307 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/afe4e40b-44ed-477b-a1d8-b9b973042e11-operator-scripts\") pod \"glance-db-create-nvnbv\" (UID: \"afe4e40b-44ed-477b-a1d8-b9b973042e11\") " pod="openstack/glance-db-create-nvnbv" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.418252 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvllq\" (UniqueName: \"kubernetes.io/projected/afe4e40b-44ed-477b-a1d8-b9b973042e11-kube-api-access-jvllq\") pod \"glance-db-create-nvnbv\" (UID: \"afe4e40b-44ed-477b-a1d8-b9b973042e11\") " pod="openstack/glance-db-create-nvnbv" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.505135 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e32216a-8f9f-440a-9c2a-bba04eb3f0e3-operator-scripts\") pod \"glance-89b5-account-create-update-h744x\" (UID: \"6e32216a-8f9f-440a-9c2a-bba04eb3f0e3\") " pod="openstack/glance-89b5-account-create-update-h744x" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.505231 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqkj7\" (UniqueName: \"kubernetes.io/projected/6e32216a-8f9f-440a-9c2a-bba04eb3f0e3-kube-api-access-kqkj7\") pod \"glance-89b5-account-create-update-h744x\" (UID: \"6e32216a-8f9f-440a-9c2a-bba04eb3f0e3\") " pod="openstack/glance-89b5-account-create-update-h744x" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.506071 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e32216a-8f9f-440a-9c2a-bba04eb3f0e3-operator-scripts\") pod \"glance-89b5-account-create-update-h744x\" (UID: \"6e32216a-8f9f-440a-9c2a-bba04eb3f0e3\") " pod="openstack/glance-89b5-account-create-update-h744x" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.521109 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqkj7\" (UniqueName: \"kubernetes.io/projected/6e32216a-8f9f-440a-9c2a-bba04eb3f0e3-kube-api-access-kqkj7\") pod \"glance-89b5-account-create-update-h744x\" (UID: \"6e32216a-8f9f-440a-9c2a-bba04eb3f0e3\") " pod="openstack/glance-89b5-account-create-update-h744x" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.539543 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-nvnbv" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.619133 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-89b5-account-create-update-h744x" Nov 25 21:46:25 crc kubenswrapper[4910]: I1125 21:46:25.809825 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:25 crc kubenswrapper[4910]: E1125 21:46:25.809989 4910 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 25 21:46:25 crc kubenswrapper[4910]: E1125 21:46:25.810008 4910 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 25 21:46:25 crc kubenswrapper[4910]: E1125 21:46:25.810057 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift podName:df4c228a-b3ae-4de6-bd0b-a761692c4476 nodeName:}" failed. No retries permitted until 2025-11-25 21:46:33.810039447 +0000 UTC m=+949.272515769 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift") pod "swift-storage-0" (UID: "df4c228a-b3ae-4de6-bd0b-a761692c4476") : configmap "swift-ring-files" not found Nov 25 21:46:26 crc kubenswrapper[4910]: I1125 21:46:26.350668 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-21bb-account-create-update-7plzn"] Nov 25 21:46:26 crc kubenswrapper[4910]: I1125 21:46:26.441271 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-bf9cj"] Nov 25 21:46:26 crc kubenswrapper[4910]: I1125 21:46:26.451319 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-nvnbv"] Nov 25 21:46:26 crc kubenswrapper[4910]: I1125 21:46:26.465747 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-a73a-account-create-update-96g2p"] Nov 25 21:46:26 crc kubenswrapper[4910]: W1125 21:46:26.470025 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode7dabc19_8625_4a01_9ad5_f0370ec7d608.slice/crio-e78244e50849885faea503d8a96db58811271ec17b14e319ef0d17d929056e40 WatchSource:0}: Error finding container e78244e50849885faea503d8a96db58811271ec17b14e319ef0d17d929056e40: Status 404 returned error can't find the container with id e78244e50849885faea503d8a96db58811271ec17b14e319ef0d17d929056e40 Nov 25 21:46:26 crc kubenswrapper[4910]: W1125 21:46:26.471433 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podafe4e40b_44ed_477b_a1d8_b9b973042e11.slice/crio-10d8cc78a2ab96759eb342f382b813b7797d62b4d2e218b04c42ef72d6047fe7 WatchSource:0}: Error finding container 10d8cc78a2ab96759eb342f382b813b7797d62b4d2e218b04c42ef72d6047fe7: Status 404 returned error can't find the container with id 10d8cc78a2ab96759eb342f382b813b7797d62b4d2e218b04c42ef72d6047fe7 Nov 25 21:46:26 crc kubenswrapper[4910]: I1125 21:46:26.638394 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-fbfdb"] Nov 25 21:46:26 crc kubenswrapper[4910]: I1125 21:46:26.656798 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-89b5-account-create-update-h744x"] Nov 25 21:46:26 crc kubenswrapper[4910]: I1125 21:46:26.661323 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-fbfdb" event={"ID":"8e02073f-e958-48a0-8bf6-8e9959924424","Type":"ContainerStarted","Data":"bbed4c175d25c5a4d8512c55139cc025ea9d00cca191b3d5c2b14dbd76407f9f"} Nov 25 21:46:26 crc kubenswrapper[4910]: I1125 21:46:26.668074 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-21bb-account-create-update-7plzn" event={"ID":"d01ae6d0-c74e-49fc-9681-0d6738b0f92b","Type":"ContainerStarted","Data":"6f3b9e563c55778a3e4df96b6b499c37bb8d1813e13545b12f847ce499f4bf72"} Nov 25 21:46:26 crc kubenswrapper[4910]: I1125 21:46:26.668123 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-21bb-account-create-update-7plzn" event={"ID":"d01ae6d0-c74e-49fc-9681-0d6738b0f92b","Type":"ContainerStarted","Data":"3af467cd29385985bab40d06c068971e6ba70e5e02aebad6b809202acdfe46c6"} Nov 25 21:46:26 crc kubenswrapper[4910]: I1125 21:46:26.681933 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a73a-account-create-update-96g2p" event={"ID":"e7dabc19-8625-4a01-9ad5-f0370ec7d608","Type":"ContainerStarted","Data":"e78244e50849885faea503d8a96db58811271ec17b14e319ef0d17d929056e40"} Nov 25 21:46:26 crc kubenswrapper[4910]: I1125 21:46:26.689763 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-nvnbv" event={"ID":"afe4e40b-44ed-477b-a1d8-b9b973042e11","Type":"ContainerStarted","Data":"10d8cc78a2ab96759eb342f382b813b7797d62b4d2e218b04c42ef72d6047fe7"} Nov 25 21:46:26 crc kubenswrapper[4910]: I1125 21:46:26.694322 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-v7clx" event={"ID":"5050ee25-88de-4888-ba01-fc11c71df0a1","Type":"ContainerStarted","Data":"71d7685befad390b42ac10bc58b1fd4d0b5c4f9af15b82c29162a5b89a0a3673"} Nov 25 21:46:26 crc kubenswrapper[4910]: I1125 21:46:26.695889 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-bf9cj" event={"ID":"060d8a13-8904-4f0b-967b-108554dec9e7","Type":"ContainerStarted","Data":"86bdf28e2fa227611c7c7e11f64a313049a7fc1a478f1102385558f88ac9a716"} Nov 25 21:46:26 crc kubenswrapper[4910]: I1125 21:46:26.700772 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-21bb-account-create-update-7plzn" podStartSLOduration=2.700744957 podStartE2EDuration="2.700744957s" podCreationTimestamp="2025-11-25 21:46:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:46:26.690107475 +0000 UTC m=+942.152583797" watchObservedRunningTime="2025-11-25 21:46:26.700744957 +0000 UTC m=+942.163221279" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.152082 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.180645 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-v7clx" podStartSLOduration=2.903517724 podStartE2EDuration="6.180623708s" podCreationTimestamp="2025-11-25 21:46:21 +0000 UTC" firstStartedPulling="2025-11-25 21:46:22.656383408 +0000 UTC m=+938.118859750" lastFinishedPulling="2025-11-25 21:46:25.933489412 +0000 UTC m=+941.395965734" observedRunningTime="2025-11-25 21:46:26.719980117 +0000 UTC m=+942.182456439" watchObservedRunningTime="2025-11-25 21:46:27.180623708 +0000 UTC m=+942.643100030" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.223908 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-fmm6b"] Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.224156 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-fmm6b" podUID="07068c0b-39ba-423b-b6f6-5f68f568a6fe" containerName="dnsmasq-dns" containerID="cri-o://e232ffbaf837378206e41067e3de37b088fac4e91500257e7ba0ca8131633841" gracePeriod=10 Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.326142 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.705900 4910 generic.go:334] "Generic (PLEG): container finished" podID="8e02073f-e958-48a0-8bf6-8e9959924424" containerID="cd16da346a97bc1763b4cc7a7a59cd128690a18d42b7519b103b301f7023350a" exitCode=0 Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.706376 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-fbfdb" event={"ID":"8e02073f-e958-48a0-8bf6-8e9959924424","Type":"ContainerDied","Data":"cd16da346a97bc1763b4cc7a7a59cd128690a18d42b7519b103b301f7023350a"} Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.710194 4910 generic.go:334] "Generic (PLEG): container finished" podID="d01ae6d0-c74e-49fc-9681-0d6738b0f92b" containerID="6f3b9e563c55778a3e4df96b6b499c37bb8d1813e13545b12f847ce499f4bf72" exitCode=0 Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.710424 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-21bb-account-create-update-7plzn" event={"ID":"d01ae6d0-c74e-49fc-9681-0d6738b0f92b","Type":"ContainerDied","Data":"6f3b9e563c55778a3e4df96b6b499c37bb8d1813e13545b12f847ce499f4bf72"} Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.712154 4910 generic.go:334] "Generic (PLEG): container finished" podID="e7dabc19-8625-4a01-9ad5-f0370ec7d608" containerID="73145759e761c29ad7989016538415ec0e9ec4cb7b2b50ab24c7b4654ede52e7" exitCode=0 Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.712216 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a73a-account-create-update-96g2p" event={"ID":"e7dabc19-8625-4a01-9ad5-f0370ec7d608","Type":"ContainerDied","Data":"73145759e761c29ad7989016538415ec0e9ec4cb7b2b50ab24c7b4654ede52e7"} Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.713670 4910 generic.go:334] "Generic (PLEG): container finished" podID="6e32216a-8f9f-440a-9c2a-bba04eb3f0e3" containerID="a9da1698719a8f65a192e7da50cf4b8a2bd0117f9e5bddaf32d284a0150b925a" exitCode=0 Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.713706 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-89b5-account-create-update-h744x" event={"ID":"6e32216a-8f9f-440a-9c2a-bba04eb3f0e3","Type":"ContainerDied","Data":"a9da1698719a8f65a192e7da50cf4b8a2bd0117f9e5bddaf32d284a0150b925a"} Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.713720 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-89b5-account-create-update-h744x" event={"ID":"6e32216a-8f9f-440a-9c2a-bba04eb3f0e3","Type":"ContainerStarted","Data":"0d8387d835bc5b5b0266186cfa70b8b901d6575f2be20afafc82c36aec18ad6f"} Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.718871 4910 generic.go:334] "Generic (PLEG): container finished" podID="afe4e40b-44ed-477b-a1d8-b9b973042e11" containerID="cc9423bd17f7832ce59a7b28a0d1f93b64b559fe8bcada446624da78cba695a2" exitCode=0 Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.718912 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-nvnbv" event={"ID":"afe4e40b-44ed-477b-a1d8-b9b973042e11","Type":"ContainerDied","Data":"cc9423bd17f7832ce59a7b28a0d1f93b64b559fe8bcada446624da78cba695a2"} Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.729081 4910 generic.go:334] "Generic (PLEG): container finished" podID="07068c0b-39ba-423b-b6f6-5f68f568a6fe" containerID="e232ffbaf837378206e41067e3de37b088fac4e91500257e7ba0ca8131633841" exitCode=0 Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.729175 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-fmm6b" event={"ID":"07068c0b-39ba-423b-b6f6-5f68f568a6fe","Type":"ContainerDied","Data":"e232ffbaf837378206e41067e3de37b088fac4e91500257e7ba0ca8131633841"} Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.729205 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-fmm6b" event={"ID":"07068c0b-39ba-423b-b6f6-5f68f568a6fe","Type":"ContainerDied","Data":"b2952e010784bd2d8ada6180307bd21bd62f02d7fe1868386502f53c5e8b1789"} Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.729215 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b2952e010784bd2d8ada6180307bd21bd62f02d7fe1868386502f53c5e8b1789" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.732652 4910 generic.go:334] "Generic (PLEG): container finished" podID="060d8a13-8904-4f0b-967b-108554dec9e7" containerID="bea635cf438c7b7d19a6264b0b6718311338cf2765128c5a59caf7e363224bad" exitCode=0 Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.733502 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-bf9cj" event={"ID":"060d8a13-8904-4f0b-967b-108554dec9e7","Type":"ContainerDied","Data":"bea635cf438c7b7d19a6264b0b6718311338cf2765128c5a59caf7e363224bad"} Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.757862 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.862769 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-ovsdbserver-nb\") pod \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.863120 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-dns-svc\") pod \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.863443 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-ovsdbserver-sb\") pod \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.863490 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-config\") pod \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.863606 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vk5sn\" (UniqueName: \"kubernetes.io/projected/07068c0b-39ba-423b-b6f6-5f68f568a6fe-kube-api-access-vk5sn\") pod \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\" (UID: \"07068c0b-39ba-423b-b6f6-5f68f568a6fe\") " Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.882174 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07068c0b-39ba-423b-b6f6-5f68f568a6fe-kube-api-access-vk5sn" (OuterVolumeSpecName: "kube-api-access-vk5sn") pod "07068c0b-39ba-423b-b6f6-5f68f568a6fe" (UID: "07068c0b-39ba-423b-b6f6-5f68f568a6fe"). InnerVolumeSpecName "kube-api-access-vk5sn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.906805 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "07068c0b-39ba-423b-b6f6-5f68f568a6fe" (UID: "07068c0b-39ba-423b-b6f6-5f68f568a6fe"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.916790 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "07068c0b-39ba-423b-b6f6-5f68f568a6fe" (UID: "07068c0b-39ba-423b-b6f6-5f68f568a6fe"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.921309 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-config" (OuterVolumeSpecName: "config") pod "07068c0b-39ba-423b-b6f6-5f68f568a6fe" (UID: "07068c0b-39ba-423b-b6f6-5f68f568a6fe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.924676 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "07068c0b-39ba-423b-b6f6-5f68f568a6fe" (UID: "07068c0b-39ba-423b-b6f6-5f68f568a6fe"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.970365 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.970444 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.970462 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.970474 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07068c0b-39ba-423b-b6f6-5f68f568a6fe-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:27 crc kubenswrapper[4910]: I1125 21:46:27.970486 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vk5sn\" (UniqueName: \"kubernetes.io/projected/07068c0b-39ba-423b-b6f6-5f68f568a6fe-kube-api-access-vk5sn\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:28 crc kubenswrapper[4910]: I1125 21:46:28.742691 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-fmm6b" Nov 25 21:46:28 crc kubenswrapper[4910]: I1125 21:46:28.810104 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-fmm6b"] Nov 25 21:46:28 crc kubenswrapper[4910]: I1125 21:46:28.819490 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-fmm6b"] Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.159642 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-89b5-account-create-update-h744x" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.219542 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07068c0b-39ba-423b-b6f6-5f68f568a6fe" path="/var/lib/kubelet/pods/07068c0b-39ba-423b-b6f6-5f68f568a6fe/volumes" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.291406 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e32216a-8f9f-440a-9c2a-bba04eb3f0e3-operator-scripts\") pod \"6e32216a-8f9f-440a-9c2a-bba04eb3f0e3\" (UID: \"6e32216a-8f9f-440a-9c2a-bba04eb3f0e3\") " Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.291514 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqkj7\" (UniqueName: \"kubernetes.io/projected/6e32216a-8f9f-440a-9c2a-bba04eb3f0e3-kube-api-access-kqkj7\") pod \"6e32216a-8f9f-440a-9c2a-bba04eb3f0e3\" (UID: \"6e32216a-8f9f-440a-9c2a-bba04eb3f0e3\") " Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.294067 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e32216a-8f9f-440a-9c2a-bba04eb3f0e3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6e32216a-8f9f-440a-9c2a-bba04eb3f0e3" (UID: "6e32216a-8f9f-440a-9c2a-bba04eb3f0e3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.300514 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e32216a-8f9f-440a-9c2a-bba04eb3f0e3-kube-api-access-kqkj7" (OuterVolumeSpecName: "kube-api-access-kqkj7") pod "6e32216a-8f9f-440a-9c2a-bba04eb3f0e3" (UID: "6e32216a-8f9f-440a-9c2a-bba04eb3f0e3"). InnerVolumeSpecName "kube-api-access-kqkj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.393613 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6e32216a-8f9f-440a-9c2a-bba04eb3f0e3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.393650 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqkj7\" (UniqueName: \"kubernetes.io/projected/6e32216a-8f9f-440a-9c2a-bba04eb3f0e3-kube-api-access-kqkj7\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.452105 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-21bb-account-create-update-7plzn" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.462068 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-nvnbv" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.469575 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a73a-account-create-update-96g2p" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.479444 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-bf9cj" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.490225 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-fbfdb" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.600328 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k7dvp\" (UniqueName: \"kubernetes.io/projected/e7dabc19-8625-4a01-9ad5-f0370ec7d608-kube-api-access-k7dvp\") pod \"e7dabc19-8625-4a01-9ad5-f0370ec7d608\" (UID: \"e7dabc19-8625-4a01-9ad5-f0370ec7d608\") " Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.600432 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/060d8a13-8904-4f0b-967b-108554dec9e7-operator-scripts\") pod \"060d8a13-8904-4f0b-967b-108554dec9e7\" (UID: \"060d8a13-8904-4f0b-967b-108554dec9e7\") " Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.600474 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e02073f-e958-48a0-8bf6-8e9959924424-operator-scripts\") pod \"8e02073f-e958-48a0-8bf6-8e9959924424\" (UID: \"8e02073f-e958-48a0-8bf6-8e9959924424\") " Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.600516 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tz45l\" (UniqueName: \"kubernetes.io/projected/8e02073f-e958-48a0-8bf6-8e9959924424-kube-api-access-tz45l\") pod \"8e02073f-e958-48a0-8bf6-8e9959924424\" (UID: \"8e02073f-e958-48a0-8bf6-8e9959924424\") " Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.600543 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9m855\" (UniqueName: \"kubernetes.io/projected/060d8a13-8904-4f0b-967b-108554dec9e7-kube-api-access-9m855\") pod \"060d8a13-8904-4f0b-967b-108554dec9e7\" (UID: \"060d8a13-8904-4f0b-967b-108554dec9e7\") " Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.600575 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wchq7\" (UniqueName: \"kubernetes.io/projected/d01ae6d0-c74e-49fc-9681-0d6738b0f92b-kube-api-access-wchq7\") pod \"d01ae6d0-c74e-49fc-9681-0d6738b0f92b\" (UID: \"d01ae6d0-c74e-49fc-9681-0d6738b0f92b\") " Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.600597 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvllq\" (UniqueName: \"kubernetes.io/projected/afe4e40b-44ed-477b-a1d8-b9b973042e11-kube-api-access-jvllq\") pod \"afe4e40b-44ed-477b-a1d8-b9b973042e11\" (UID: \"afe4e40b-44ed-477b-a1d8-b9b973042e11\") " Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.600683 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d01ae6d0-c74e-49fc-9681-0d6738b0f92b-operator-scripts\") pod \"d01ae6d0-c74e-49fc-9681-0d6738b0f92b\" (UID: \"d01ae6d0-c74e-49fc-9681-0d6738b0f92b\") " Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.600722 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7dabc19-8625-4a01-9ad5-f0370ec7d608-operator-scripts\") pod \"e7dabc19-8625-4a01-9ad5-f0370ec7d608\" (UID: \"e7dabc19-8625-4a01-9ad5-f0370ec7d608\") " Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.600755 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/afe4e40b-44ed-477b-a1d8-b9b973042e11-operator-scripts\") pod \"afe4e40b-44ed-477b-a1d8-b9b973042e11\" (UID: \"afe4e40b-44ed-477b-a1d8-b9b973042e11\") " Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.601863 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/afe4e40b-44ed-477b-a1d8-b9b973042e11-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "afe4e40b-44ed-477b-a1d8-b9b973042e11" (UID: "afe4e40b-44ed-477b-a1d8-b9b973042e11"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.601996 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/060d8a13-8904-4f0b-967b-108554dec9e7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "060d8a13-8904-4f0b-967b-108554dec9e7" (UID: "060d8a13-8904-4f0b-967b-108554dec9e7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.602373 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7dabc19-8625-4a01-9ad5-f0370ec7d608-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e7dabc19-8625-4a01-9ad5-f0370ec7d608" (UID: "e7dabc19-8625-4a01-9ad5-f0370ec7d608"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.602471 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d01ae6d0-c74e-49fc-9681-0d6738b0f92b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d01ae6d0-c74e-49fc-9681-0d6738b0f92b" (UID: "d01ae6d0-c74e-49fc-9681-0d6738b0f92b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.602488 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8e02073f-e958-48a0-8bf6-8e9959924424-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8e02073f-e958-48a0-8bf6-8e9959924424" (UID: "8e02073f-e958-48a0-8bf6-8e9959924424"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.605047 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7dabc19-8625-4a01-9ad5-f0370ec7d608-kube-api-access-k7dvp" (OuterVolumeSpecName: "kube-api-access-k7dvp") pod "e7dabc19-8625-4a01-9ad5-f0370ec7d608" (UID: "e7dabc19-8625-4a01-9ad5-f0370ec7d608"). InnerVolumeSpecName "kube-api-access-k7dvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.605085 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d01ae6d0-c74e-49fc-9681-0d6738b0f92b-kube-api-access-wchq7" (OuterVolumeSpecName: "kube-api-access-wchq7") pod "d01ae6d0-c74e-49fc-9681-0d6738b0f92b" (UID: "d01ae6d0-c74e-49fc-9681-0d6738b0f92b"). InnerVolumeSpecName "kube-api-access-wchq7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.606014 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e02073f-e958-48a0-8bf6-8e9959924424-kube-api-access-tz45l" (OuterVolumeSpecName: "kube-api-access-tz45l") pod "8e02073f-e958-48a0-8bf6-8e9959924424" (UID: "8e02073f-e958-48a0-8bf6-8e9959924424"). InnerVolumeSpecName "kube-api-access-tz45l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.606927 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/afe4e40b-44ed-477b-a1d8-b9b973042e11-kube-api-access-jvllq" (OuterVolumeSpecName: "kube-api-access-jvllq") pod "afe4e40b-44ed-477b-a1d8-b9b973042e11" (UID: "afe4e40b-44ed-477b-a1d8-b9b973042e11"). InnerVolumeSpecName "kube-api-access-jvllq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.608088 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/060d8a13-8904-4f0b-967b-108554dec9e7-kube-api-access-9m855" (OuterVolumeSpecName: "kube-api-access-9m855") pod "060d8a13-8904-4f0b-967b-108554dec9e7" (UID: "060d8a13-8904-4f0b-967b-108554dec9e7"). InnerVolumeSpecName "kube-api-access-9m855". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.702925 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8e02073f-e958-48a0-8bf6-8e9959924424-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.703411 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tz45l\" (UniqueName: \"kubernetes.io/projected/8e02073f-e958-48a0-8bf6-8e9959924424-kube-api-access-tz45l\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.703487 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9m855\" (UniqueName: \"kubernetes.io/projected/060d8a13-8904-4f0b-967b-108554dec9e7-kube-api-access-9m855\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.703545 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wchq7\" (UniqueName: \"kubernetes.io/projected/d01ae6d0-c74e-49fc-9681-0d6738b0f92b-kube-api-access-wchq7\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.703607 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvllq\" (UniqueName: \"kubernetes.io/projected/afe4e40b-44ed-477b-a1d8-b9b973042e11-kube-api-access-jvllq\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.703669 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d01ae6d0-c74e-49fc-9681-0d6738b0f92b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.703730 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e7dabc19-8625-4a01-9ad5-f0370ec7d608-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.703789 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/afe4e40b-44ed-477b-a1d8-b9b973042e11-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.703850 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k7dvp\" (UniqueName: \"kubernetes.io/projected/e7dabc19-8625-4a01-9ad5-f0370ec7d608-kube-api-access-k7dvp\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.703937 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/060d8a13-8904-4f0b-967b-108554dec9e7-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.751825 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-21bb-account-create-update-7plzn" event={"ID":"d01ae6d0-c74e-49fc-9681-0d6738b0f92b","Type":"ContainerDied","Data":"3af467cd29385985bab40d06c068971e6ba70e5e02aebad6b809202acdfe46c6"} Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.752700 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3af467cd29385985bab40d06c068971e6ba70e5e02aebad6b809202acdfe46c6" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.751872 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-21bb-account-create-update-7plzn" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.753339 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-a73a-account-create-update-96g2p" event={"ID":"e7dabc19-8625-4a01-9ad5-f0370ec7d608","Type":"ContainerDied","Data":"e78244e50849885faea503d8a96db58811271ec17b14e319ef0d17d929056e40"} Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.753382 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e78244e50849885faea503d8a96db58811271ec17b14e319ef0d17d929056e40" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.753438 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-a73a-account-create-update-96g2p" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.774776 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-89b5-account-create-update-h744x" event={"ID":"6e32216a-8f9f-440a-9c2a-bba04eb3f0e3","Type":"ContainerDied","Data":"0d8387d835bc5b5b0266186cfa70b8b901d6575f2be20afafc82c36aec18ad6f"} Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.774838 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d8387d835bc5b5b0266186cfa70b8b901d6575f2be20afafc82c36aec18ad6f" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.774882 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-89b5-account-create-update-h744x" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.777103 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-nvnbv" event={"ID":"afe4e40b-44ed-477b-a1d8-b9b973042e11","Type":"ContainerDied","Data":"10d8cc78a2ab96759eb342f382b813b7797d62b4d2e218b04c42ef72d6047fe7"} Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.777138 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="10d8cc78a2ab96759eb342f382b813b7797d62b4d2e218b04c42ef72d6047fe7" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.777197 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-nvnbv" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.778743 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-bf9cj" event={"ID":"060d8a13-8904-4f0b-967b-108554dec9e7","Type":"ContainerDied","Data":"86bdf28e2fa227611c7c7e11f64a313049a7fc1a478f1102385558f88ac9a716"} Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.778775 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="86bdf28e2fa227611c7c7e11f64a313049a7fc1a478f1102385558f88ac9a716" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.778831 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-bf9cj" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.780409 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-fbfdb" event={"ID":"8e02073f-e958-48a0-8bf6-8e9959924424","Type":"ContainerDied","Data":"bbed4c175d25c5a4d8512c55139cc025ea9d00cca191b3d5c2b14dbd76407f9f"} Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.780494 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bbed4c175d25c5a4d8512c55139cc025ea9d00cca191b3d5c2b14dbd76407f9f" Nov 25 21:46:29 crc kubenswrapper[4910]: I1125 21:46:29.780590 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-fbfdb" Nov 25 21:46:33 crc kubenswrapper[4910]: I1125 21:46:33.826493 4910 generic.go:334] "Generic (PLEG): container finished" podID="5050ee25-88de-4888-ba01-fc11c71df0a1" containerID="71d7685befad390b42ac10bc58b1fd4d0b5c4f9af15b82c29162a5b89a0a3673" exitCode=0 Nov 25 21:46:33 crc kubenswrapper[4910]: I1125 21:46:33.826605 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-v7clx" event={"ID":"5050ee25-88de-4888-ba01-fc11c71df0a1","Type":"ContainerDied","Data":"71d7685befad390b42ac10bc58b1fd4d0b5c4f9af15b82c29162a5b89a0a3673"} Nov 25 21:46:33 crc kubenswrapper[4910]: I1125 21:46:33.880995 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:33 crc kubenswrapper[4910]: I1125 21:46:33.890908 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/df4c228a-b3ae-4de6-bd0b-a761692c4476-etc-swift\") pod \"swift-storage-0\" (UID: \"df4c228a-b3ae-4de6-bd0b-a761692c4476\") " pod="openstack/swift-storage-0" Nov 25 21:46:34 crc kubenswrapper[4910]: I1125 21:46:34.148573 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 25 21:46:34 crc kubenswrapper[4910]: I1125 21:46:34.710210 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 25 21:46:34 crc kubenswrapper[4910]: I1125 21:46:34.834554 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"9d16c56163d8e732728e893b2f66b0d61b1ca1e255543843aa465e5b18f90c59"} Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.165267 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.334346 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-swiftconf\") pod \"5050ee25-88de-4888-ba01-fc11c71df0a1\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.334420 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-dispersionconf\") pod \"5050ee25-88de-4888-ba01-fc11c71df0a1\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.334481 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5050ee25-88de-4888-ba01-fc11c71df0a1-scripts\") pod \"5050ee25-88de-4888-ba01-fc11c71df0a1\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.334563 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5050ee25-88de-4888-ba01-fc11c71df0a1-ring-data-devices\") pod \"5050ee25-88de-4888-ba01-fc11c71df0a1\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.334640 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-combined-ca-bundle\") pod \"5050ee25-88de-4888-ba01-fc11c71df0a1\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.334695 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5050ee25-88de-4888-ba01-fc11c71df0a1-etc-swift\") pod \"5050ee25-88de-4888-ba01-fc11c71df0a1\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.334811 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7q4s\" (UniqueName: \"kubernetes.io/projected/5050ee25-88de-4888-ba01-fc11c71df0a1-kube-api-access-w7q4s\") pod \"5050ee25-88de-4888-ba01-fc11c71df0a1\" (UID: \"5050ee25-88de-4888-ba01-fc11c71df0a1\") " Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.336114 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5050ee25-88de-4888-ba01-fc11c71df0a1-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "5050ee25-88de-4888-ba01-fc11c71df0a1" (UID: "5050ee25-88de-4888-ba01-fc11c71df0a1"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.337129 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5050ee25-88de-4888-ba01-fc11c71df0a1-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "5050ee25-88de-4888-ba01-fc11c71df0a1" (UID: "5050ee25-88de-4888-ba01-fc11c71df0a1"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.342911 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5050ee25-88de-4888-ba01-fc11c71df0a1-kube-api-access-w7q4s" (OuterVolumeSpecName: "kube-api-access-w7q4s") pod "5050ee25-88de-4888-ba01-fc11c71df0a1" (UID: "5050ee25-88de-4888-ba01-fc11c71df0a1"). InnerVolumeSpecName "kube-api-access-w7q4s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.347428 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-r42pw"] Nov 25 21:46:35 crc kubenswrapper[4910]: E1125 21:46:35.348032 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7dabc19-8625-4a01-9ad5-f0370ec7d608" containerName="mariadb-account-create-update" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348049 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7dabc19-8625-4a01-9ad5-f0370ec7d608" containerName="mariadb-account-create-update" Nov 25 21:46:35 crc kubenswrapper[4910]: E1125 21:46:35.348071 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e32216a-8f9f-440a-9c2a-bba04eb3f0e3" containerName="mariadb-account-create-update" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348077 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e32216a-8f9f-440a-9c2a-bba04eb3f0e3" containerName="mariadb-account-create-update" Nov 25 21:46:35 crc kubenswrapper[4910]: E1125 21:46:35.348088 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="afe4e40b-44ed-477b-a1d8-b9b973042e11" containerName="mariadb-database-create" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348094 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="afe4e40b-44ed-477b-a1d8-b9b973042e11" containerName="mariadb-database-create" Nov 25 21:46:35 crc kubenswrapper[4910]: E1125 21:46:35.348114 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d01ae6d0-c74e-49fc-9681-0d6738b0f92b" containerName="mariadb-account-create-update" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348119 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d01ae6d0-c74e-49fc-9681-0d6738b0f92b" containerName="mariadb-account-create-update" Nov 25 21:46:35 crc kubenswrapper[4910]: E1125 21:46:35.348137 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e02073f-e958-48a0-8bf6-8e9959924424" containerName="mariadb-database-create" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348143 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e02073f-e958-48a0-8bf6-8e9959924424" containerName="mariadb-database-create" Nov 25 21:46:35 crc kubenswrapper[4910]: E1125 21:46:35.348155 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="060d8a13-8904-4f0b-967b-108554dec9e7" containerName="mariadb-database-create" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348162 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="060d8a13-8904-4f0b-967b-108554dec9e7" containerName="mariadb-database-create" Nov 25 21:46:35 crc kubenswrapper[4910]: E1125 21:46:35.348174 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07068c0b-39ba-423b-b6f6-5f68f568a6fe" containerName="dnsmasq-dns" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348180 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="07068c0b-39ba-423b-b6f6-5f68f568a6fe" containerName="dnsmasq-dns" Nov 25 21:46:35 crc kubenswrapper[4910]: E1125 21:46:35.348191 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5050ee25-88de-4888-ba01-fc11c71df0a1" containerName="swift-ring-rebalance" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348197 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5050ee25-88de-4888-ba01-fc11c71df0a1" containerName="swift-ring-rebalance" Nov 25 21:46:35 crc kubenswrapper[4910]: E1125 21:46:35.348207 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07068c0b-39ba-423b-b6f6-5f68f568a6fe" containerName="init" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348213 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="07068c0b-39ba-423b-b6f6-5f68f568a6fe" containerName="init" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348384 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e32216a-8f9f-440a-9c2a-bba04eb3f0e3" containerName="mariadb-account-create-update" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348394 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d01ae6d0-c74e-49fc-9681-0d6738b0f92b" containerName="mariadb-account-create-update" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348405 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="060d8a13-8904-4f0b-967b-108554dec9e7" containerName="mariadb-database-create" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348412 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e02073f-e958-48a0-8bf6-8e9959924424" containerName="mariadb-database-create" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348421 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="07068c0b-39ba-423b-b6f6-5f68f568a6fe" containerName="dnsmasq-dns" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348429 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5050ee25-88de-4888-ba01-fc11c71df0a1" containerName="swift-ring-rebalance" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348441 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="afe4e40b-44ed-477b-a1d8-b9b973042e11" containerName="mariadb-database-create" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.348449 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7dabc19-8625-4a01-9ad5-f0370ec7d608" containerName="mariadb-account-create-update" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.352546 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "5050ee25-88de-4888-ba01-fc11c71df0a1" (UID: "5050ee25-88de-4888-ba01-fc11c71df0a1"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.353449 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-r42pw" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.356356 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.356449 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-8nw9f" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.359676 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-r42pw"] Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.371690 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5050ee25-88de-4888-ba01-fc11c71df0a1-scripts" (OuterVolumeSpecName: "scripts") pod "5050ee25-88de-4888-ba01-fc11c71df0a1" (UID: "5050ee25-88de-4888-ba01-fc11c71df0a1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.380896 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5050ee25-88de-4888-ba01-fc11c71df0a1" (UID: "5050ee25-88de-4888-ba01-fc11c71df0a1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.393979 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "5050ee25-88de-4888-ba01-fc11c71df0a1" (UID: "5050ee25-88de-4888-ba01-fc11c71df0a1"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.436692 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7q4s\" (UniqueName: \"kubernetes.io/projected/5050ee25-88de-4888-ba01-fc11c71df0a1-kube-api-access-w7q4s\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.436733 4910 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.436746 4910 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.436755 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/5050ee25-88de-4888-ba01-fc11c71df0a1-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.436765 4910 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/5050ee25-88de-4888-ba01-fc11c71df0a1-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.436773 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5050ee25-88de-4888-ba01-fc11c71df0a1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.436782 4910 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/5050ee25-88de-4888-ba01-fc11c71df0a1-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.523957 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.524397 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-dbkwd" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.538468 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-config-data\") pod \"glance-db-sync-r42pw\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " pod="openstack/glance-db-sync-r42pw" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.538528 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-combined-ca-bundle\") pod \"glance-db-sync-r42pw\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " pod="openstack/glance-db-sync-r42pw" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.538566 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-db-sync-config-data\") pod \"glance-db-sync-r42pw\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " pod="openstack/glance-db-sync-r42pw" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.538611 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gdnd5\" (UniqueName: \"kubernetes.io/projected/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-kube-api-access-gdnd5\") pod \"glance-db-sync-r42pw\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " pod="openstack/glance-db-sync-r42pw" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.545258 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-554dc" podUID="5d3afe23-a5d2-4f9c-bdaa-f80020ef6226" containerName="ovn-controller" probeResult="failure" output=< Nov 25 21:46:35 crc kubenswrapper[4910]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 25 21:46:35 crc kubenswrapper[4910]: > Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.640717 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gdnd5\" (UniqueName: \"kubernetes.io/projected/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-kube-api-access-gdnd5\") pod \"glance-db-sync-r42pw\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " pod="openstack/glance-db-sync-r42pw" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.640952 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-config-data\") pod \"glance-db-sync-r42pw\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " pod="openstack/glance-db-sync-r42pw" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.642110 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-combined-ca-bundle\") pod \"glance-db-sync-r42pw\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " pod="openstack/glance-db-sync-r42pw" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.642156 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-db-sync-config-data\") pod \"glance-db-sync-r42pw\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " pod="openstack/glance-db-sync-r42pw" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.646928 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-combined-ca-bundle\") pod \"glance-db-sync-r42pw\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " pod="openstack/glance-db-sync-r42pw" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.647388 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-db-sync-config-data\") pod \"glance-db-sync-r42pw\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " pod="openstack/glance-db-sync-r42pw" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.648130 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-config-data\") pod \"glance-db-sync-r42pw\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " pod="openstack/glance-db-sync-r42pw" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.657600 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gdnd5\" (UniqueName: \"kubernetes.io/projected/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-kube-api-access-gdnd5\") pod \"glance-db-sync-r42pw\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " pod="openstack/glance-db-sync-r42pw" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.753853 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-554dc-config-tgkb2"] Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.755014 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.756765 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.763656 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-r42pw" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.769084 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-554dc-config-tgkb2"] Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.867657 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-v7clx" event={"ID":"5050ee25-88de-4888-ba01-fc11c71df0a1","Type":"ContainerDied","Data":"b5ffa97a78f1abc3e0e6c84ce2c611f6d50cdd9a2d0cec587abd9da12b0bbcfd"} Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.867702 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b5ffa97a78f1abc3e0e6c84ce2c611f6d50cdd9a2d0cec587abd9da12b0bbcfd" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.867779 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-v7clx" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.953530 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-log-ovn\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.953592 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-run\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.953621 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/099380d7-ac21-4e05-a5ab-7f05185d4348-additional-scripts\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.953642 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4vnf\" (UniqueName: \"kubernetes.io/projected/099380d7-ac21-4e05-a5ab-7f05185d4348-kube-api-access-l4vnf\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.953667 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/099380d7-ac21-4e05-a5ab-7f05185d4348-scripts\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:35 crc kubenswrapper[4910]: I1125 21:46:35.953692 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-run-ovn\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.054600 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-log-ovn\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.054663 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-run\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.054688 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/099380d7-ac21-4e05-a5ab-7f05185d4348-additional-scripts\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.054709 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4vnf\" (UniqueName: \"kubernetes.io/projected/099380d7-ac21-4e05-a5ab-7f05185d4348-kube-api-access-l4vnf\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.054738 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/099380d7-ac21-4e05-a5ab-7f05185d4348-scripts\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.054765 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-run-ovn\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.054960 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-log-ovn\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.055022 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-run-ovn\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.055144 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-run\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.055521 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/099380d7-ac21-4e05-a5ab-7f05185d4348-additional-scripts\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.058557 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/099380d7-ac21-4e05-a5ab-7f05185d4348-scripts\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.070896 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4vnf\" (UniqueName: \"kubernetes.io/projected/099380d7-ac21-4e05-a5ab-7f05185d4348-kube-api-access-l4vnf\") pod \"ovn-controller-554dc-config-tgkb2\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.075803 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.583295 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-554dc-config-tgkb2"] Nov 25 21:46:36 crc kubenswrapper[4910]: W1125 21:46:36.598106 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod099380d7_ac21_4e05_a5ab_7f05185d4348.slice/crio-ec92afdde0509d4f9bf2707d36e3941378ac632c117ca258f565415012992b9f WatchSource:0}: Error finding container ec92afdde0509d4f9bf2707d36e3941378ac632c117ca258f565415012992b9f: Status 404 returned error can't find the container with id ec92afdde0509d4f9bf2707d36e3941378ac632c117ca258f565415012992b9f Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.891928 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"9a0ee210cd19f471b495b4e72bc86da382affaec04fe883429341cfc0118b416"} Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.892330 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"e903c53d9ec3396ee1979f3239afe1e6833e889ef6214efb6c746dcfc227ded4"} Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.894964 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-554dc-config-tgkb2" event={"ID":"099380d7-ac21-4e05-a5ab-7f05185d4348","Type":"ContainerStarted","Data":"ec92afdde0509d4f9bf2707d36e3941378ac632c117ca258f565415012992b9f"} Nov 25 21:46:36 crc kubenswrapper[4910]: I1125 21:46:36.912911 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-r42pw"] Nov 25 21:46:36 crc kubenswrapper[4910]: W1125 21:46:36.917570 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6ee1badb_9c33_4b86_b2c2_cd96e6c30bc5.slice/crio-b1e22720212ffdbaf795c42eb631e0ef4efbd42af9c0eb4f8a27dbd1b945f16a WatchSource:0}: Error finding container b1e22720212ffdbaf795c42eb631e0ef4efbd42af9c0eb4f8a27dbd1b945f16a: Status 404 returned error can't find the container with id b1e22720212ffdbaf795c42eb631e0ef4efbd42af9c0eb4f8a27dbd1b945f16a Nov 25 21:46:37 crc kubenswrapper[4910]: I1125 21:46:37.909229 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"f6484a2ff66af7574a4670ecaffa7f1d9f1064402473aa1ce34aba0b606b2a66"} Nov 25 21:46:37 crc kubenswrapper[4910]: I1125 21:46:37.909558 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"8e5d7140939780ec8ce036696c24058da6d3829fee6bbd5e6dbe6ccb8cf7a2eb"} Nov 25 21:46:37 crc kubenswrapper[4910]: I1125 21:46:37.911711 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-r42pw" event={"ID":"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5","Type":"ContainerStarted","Data":"b1e22720212ffdbaf795c42eb631e0ef4efbd42af9c0eb4f8a27dbd1b945f16a"} Nov 25 21:46:37 crc kubenswrapper[4910]: I1125 21:46:37.914110 4910 generic.go:334] "Generic (PLEG): container finished" podID="9b20e3e8-ac28-471d-82ed-e619a78a7c55" containerID="a663883b01e5d94c3a7a51f2d4075ac478de107d5f88233f99ea3c93dfe1bda9" exitCode=0 Nov 25 21:46:37 crc kubenswrapper[4910]: I1125 21:46:37.914165 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9b20e3e8-ac28-471d-82ed-e619a78a7c55","Type":"ContainerDied","Data":"a663883b01e5d94c3a7a51f2d4075ac478de107d5f88233f99ea3c93dfe1bda9"} Nov 25 21:46:37 crc kubenswrapper[4910]: I1125 21:46:37.918749 4910 generic.go:334] "Generic (PLEG): container finished" podID="099380d7-ac21-4e05-a5ab-7f05185d4348" containerID="36f5cd42b631fd69c9e5846f353e8f21ae91761d6f2cb3d74208d623a99873e2" exitCode=0 Nov 25 21:46:37 crc kubenswrapper[4910]: I1125 21:46:37.918822 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-554dc-config-tgkb2" event={"ID":"099380d7-ac21-4e05-a5ab-7f05185d4348","Type":"ContainerDied","Data":"36f5cd42b631fd69c9e5846f353e8f21ae91761d6f2cb3d74208d623a99873e2"} Nov 25 21:46:38 crc kubenswrapper[4910]: I1125 21:46:38.943398 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9b20e3e8-ac28-471d-82ed-e619a78a7c55","Type":"ContainerStarted","Data":"b825c8d5cffc9bffbd7bed750a3dc1621251f7c189ded2dc33ebc862bd5e61e2"} Nov 25 21:46:38 crc kubenswrapper[4910]: I1125 21:46:38.944686 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:46:38 crc kubenswrapper[4910]: I1125 21:46:38.954105 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"197325abaf210fd603928b90924f89b2d05cc98f45608b869bc69ea8ef6966b4"} Nov 25 21:46:38 crc kubenswrapper[4910]: I1125 21:46:38.954161 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"4dc1ed929e54b38a8f95382ae46581abede569347098b1479266a0c9673a1e49"} Nov 25 21:46:38 crc kubenswrapper[4910]: I1125 21:46:38.954177 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"fbf4bea2ad362a5764797f443b12ca576a98ea7a35994f2664372304cb614c84"} Nov 25 21:46:38 crc kubenswrapper[4910]: I1125 21:46:38.984604 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=37.654980433 podStartE2EDuration="1m8.984572295s" podCreationTimestamp="2025-11-25 21:45:30 +0000 UTC" firstStartedPulling="2025-11-25 21:45:32.488928079 +0000 UTC m=+887.951404401" lastFinishedPulling="2025-11-25 21:46:03.818519941 +0000 UTC m=+919.280996263" observedRunningTime="2025-11-25 21:46:38.972446123 +0000 UTC m=+954.434922465" watchObservedRunningTime="2025-11-25 21:46:38.984572295 +0000 UTC m=+954.447048617" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.317059 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.416872 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-run\") pod \"099380d7-ac21-4e05-a5ab-7f05185d4348\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.416944 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/099380d7-ac21-4e05-a5ab-7f05185d4348-additional-scripts\") pod \"099380d7-ac21-4e05-a5ab-7f05185d4348\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.416988 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-run-ovn\") pod \"099380d7-ac21-4e05-a5ab-7f05185d4348\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.417023 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-log-ovn\") pod \"099380d7-ac21-4e05-a5ab-7f05185d4348\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.417142 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/099380d7-ac21-4e05-a5ab-7f05185d4348-scripts\") pod \"099380d7-ac21-4e05-a5ab-7f05185d4348\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.417215 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4vnf\" (UniqueName: \"kubernetes.io/projected/099380d7-ac21-4e05-a5ab-7f05185d4348-kube-api-access-l4vnf\") pod \"099380d7-ac21-4e05-a5ab-7f05185d4348\" (UID: \"099380d7-ac21-4e05-a5ab-7f05185d4348\") " Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.417524 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "099380d7-ac21-4e05-a5ab-7f05185d4348" (UID: "099380d7-ac21-4e05-a5ab-7f05185d4348"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.418094 4910 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.418128 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-run" (OuterVolumeSpecName: "var-run") pod "099380d7-ac21-4e05-a5ab-7f05185d4348" (UID: "099380d7-ac21-4e05-a5ab-7f05185d4348"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.418978 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/099380d7-ac21-4e05-a5ab-7f05185d4348-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "099380d7-ac21-4e05-a5ab-7f05185d4348" (UID: "099380d7-ac21-4e05-a5ab-7f05185d4348"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.419013 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "099380d7-ac21-4e05-a5ab-7f05185d4348" (UID: "099380d7-ac21-4e05-a5ab-7f05185d4348"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.420042 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/099380d7-ac21-4e05-a5ab-7f05185d4348-scripts" (OuterVolumeSpecName: "scripts") pod "099380d7-ac21-4e05-a5ab-7f05185d4348" (UID: "099380d7-ac21-4e05-a5ab-7f05185d4348"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.424130 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/099380d7-ac21-4e05-a5ab-7f05185d4348-kube-api-access-l4vnf" (OuterVolumeSpecName: "kube-api-access-l4vnf") pod "099380d7-ac21-4e05-a5ab-7f05185d4348" (UID: "099380d7-ac21-4e05-a5ab-7f05185d4348"). InnerVolumeSpecName "kube-api-access-l4vnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.519965 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4vnf\" (UniqueName: \"kubernetes.io/projected/099380d7-ac21-4e05-a5ab-7f05185d4348-kube-api-access-l4vnf\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.520014 4910 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-run\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.520029 4910 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/099380d7-ac21-4e05-a5ab-7f05185d4348-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.520041 4910 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/099380d7-ac21-4e05-a5ab-7f05185d4348-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.520053 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/099380d7-ac21-4e05-a5ab-7f05185d4348-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.968143 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-554dc-config-tgkb2" event={"ID":"099380d7-ac21-4e05-a5ab-7f05185d4348","Type":"ContainerDied","Data":"ec92afdde0509d4f9bf2707d36e3941378ac632c117ca258f565415012992b9f"} Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.970003 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ec92afdde0509d4f9bf2707d36e3941378ac632c117ca258f565415012992b9f" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.968197 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-554dc-config-tgkb2" Nov 25 21:46:39 crc kubenswrapper[4910]: I1125 21:46:39.975684 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"64a3ef808516502b2b0a0f9751dcfb75b8204c9bef9babb2b4674cdb2cbb6c53"} Nov 25 21:46:40 crc kubenswrapper[4910]: I1125 21:46:40.428021 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-554dc-config-tgkb2"] Nov 25 21:46:40 crc kubenswrapper[4910]: I1125 21:46:40.443055 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-554dc-config-tgkb2"] Nov 25 21:46:40 crc kubenswrapper[4910]: I1125 21:46:40.569995 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-554dc" Nov 25 21:46:41 crc kubenswrapper[4910]: I1125 21:46:41.029893 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"40254ee2f845b9c8af6c599b34405ec92ca5372545c432e4ba0e3ceca9c84591"} Nov 25 21:46:41 crc kubenswrapper[4910]: I1125 21:46:41.213590 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="099380d7-ac21-4e05-a5ab-7f05185d4348" path="/var/lib/kubelet/pods/099380d7-ac21-4e05-a5ab-7f05185d4348/volumes" Nov 25 21:46:42 crc kubenswrapper[4910]: I1125 21:46:42.045064 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"e2cbb2ea1b3cc97191636ef79e9b57cd67f0dec983ff96e6f54aeebb4ad0ec1e"} Nov 25 21:46:42 crc kubenswrapper[4910]: I1125 21:46:42.045462 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"1e047d30e80d4b1acf4ece8b8fb8cfd90bcc66d96962489baef669bc17e297b9"} Nov 25 21:46:42 crc kubenswrapper[4910]: I1125 21:46:42.045479 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"1ef96e79b9c8e29862344b91477a989f4fd31e33ad4712251b84da3aef26485f"} Nov 25 21:46:42 crc kubenswrapper[4910]: I1125 21:46:42.045491 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"4c5627e30bbe4afe8cd2c1a9f71e8ded7e890691fd924f6d3123fa9a60cdb3f1"} Nov 25 21:46:42 crc kubenswrapper[4910]: I1125 21:46:42.045503 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"8c961646798481023e5477dad99e76c3cd7daf2933647a3e9e667682f5c4bebb"} Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.065639 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"df4c228a-b3ae-4de6-bd0b-a761692c4476","Type":"ContainerStarted","Data":"ceea1390157cdff4b6ff2f5e88ced46077fbd5074aceece944afc8c4163ed272"} Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.067524 4910 generic.go:334] "Generic (PLEG): container finished" podID="58eca84e-dfac-4af7-ad45-241a776f81d6" containerID="3a5caa60ba704cb56a0ba9be7c1f393181c0267978e6e6d9500155cc7e70b7c2" exitCode=0 Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.067562 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"58eca84e-dfac-4af7-ad45-241a776f81d6","Type":"ContainerDied","Data":"3a5caa60ba704cb56a0ba9be7c1f393181c0267978e6e6d9500155cc7e70b7c2"} Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.113763 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=21.202959547 podStartE2EDuration="27.113738664s" podCreationTimestamp="2025-11-25 21:46:16 +0000 UTC" firstStartedPulling="2025-11-25 21:46:34.718926624 +0000 UTC m=+950.181402946" lastFinishedPulling="2025-11-25 21:46:40.629705741 +0000 UTC m=+956.092182063" observedRunningTime="2025-11-25 21:46:43.109524382 +0000 UTC m=+958.572000764" watchObservedRunningTime="2025-11-25 21:46:43.113738664 +0000 UTC m=+958.576215006" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.385870 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-8hktq"] Nov 25 21:46:43 crc kubenswrapper[4910]: E1125 21:46:43.386659 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="099380d7-ac21-4e05-a5ab-7f05185d4348" containerName="ovn-config" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.386676 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="099380d7-ac21-4e05-a5ab-7f05185d4348" containerName="ovn-config" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.386882 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="099380d7-ac21-4e05-a5ab-7f05185d4348" containerName="ovn-config" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.387900 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.396335 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.406491 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-8hktq"] Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.497113 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.497187 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.497266 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-config\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.497453 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.497530 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t8pmg\" (UniqueName: \"kubernetes.io/projected/d9af0ae9-a945-4d74-8472-702a9bef15b0-kube-api-access-t8pmg\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.497624 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.600314 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.600398 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.600439 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-config\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.600466 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.600498 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t8pmg\" (UniqueName: \"kubernetes.io/projected/d9af0ae9-a945-4d74-8472-702a9bef15b0-kube-api-access-t8pmg\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.600539 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.601750 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-dns-svc\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.602384 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-dns-swift-storage-0\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.602967 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-ovsdbserver-sb\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.603619 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-config\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.604340 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-ovsdbserver-nb\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.627776 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t8pmg\" (UniqueName: \"kubernetes.io/projected/d9af0ae9-a945-4d74-8472-702a9bef15b0-kube-api-access-t8pmg\") pod \"dnsmasq-dns-6d5b6d6b67-8hktq\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:43 crc kubenswrapper[4910]: I1125 21:46:43.720943 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:49 crc kubenswrapper[4910]: I1125 21:46:49.426203 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-8hktq"] Nov 25 21:46:49 crc kubenswrapper[4910]: W1125 21:46:49.429173 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9af0ae9_a945_4d74_8472_702a9bef15b0.slice/crio-1c3724778be98a858449e7aef6a0a5748e8932fe08bbe5b5f917862fa59f2245 WatchSource:0}: Error finding container 1c3724778be98a858449e7aef6a0a5748e8932fe08bbe5b5f917862fa59f2245: Status 404 returned error can't find the container with id 1c3724778be98a858449e7aef6a0a5748e8932fe08bbe5b5f917862fa59f2245 Nov 25 21:46:50 crc kubenswrapper[4910]: I1125 21:46:50.153168 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"58eca84e-dfac-4af7-ad45-241a776f81d6","Type":"ContainerStarted","Data":"a878a11275d059b3812b533330998964e76b42f5acaf365e2ee090734c1fa3f4"} Nov 25 21:46:50 crc kubenswrapper[4910]: I1125 21:46:50.155119 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 21:46:50 crc kubenswrapper[4910]: I1125 21:46:50.157789 4910 generic.go:334] "Generic (PLEG): container finished" podID="d9af0ae9-a945-4d74-8472-702a9bef15b0" containerID="dd70cc8fd201df03477356224bd1566c8f851a8633e4d3f0b1adf5989a88f048" exitCode=0 Nov 25 21:46:50 crc kubenswrapper[4910]: I1125 21:46:50.157876 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" event={"ID":"d9af0ae9-a945-4d74-8472-702a9bef15b0","Type":"ContainerDied","Data":"dd70cc8fd201df03477356224bd1566c8f851a8633e4d3f0b1adf5989a88f048"} Nov 25 21:46:50 crc kubenswrapper[4910]: I1125 21:46:50.157910 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" event={"ID":"d9af0ae9-a945-4d74-8472-702a9bef15b0","Type":"ContainerStarted","Data":"1c3724778be98a858449e7aef6a0a5748e8932fe08bbe5b5f917862fa59f2245"} Nov 25 21:46:50 crc kubenswrapper[4910]: I1125 21:46:50.161599 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-r42pw" event={"ID":"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5","Type":"ContainerStarted","Data":"e2d911f256192b349ab826bb2617841035c65fd6fc2dd122b5d10cdb5ea5ef73"} Nov 25 21:46:50 crc kubenswrapper[4910]: I1125 21:46:50.198215 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=-9223371956.656584 podStartE2EDuration="1m20.198192778s" podCreationTimestamp="2025-11-25 21:45:30 +0000 UTC" firstStartedPulling="2025-11-25 21:45:32.20552057 +0000 UTC m=+887.667996892" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:46:50.191634754 +0000 UTC m=+965.654111086" watchObservedRunningTime="2025-11-25 21:46:50.198192778 +0000 UTC m=+965.660669100" Nov 25 21:46:50 crc kubenswrapper[4910]: I1125 21:46:50.227101 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-r42pw" podStartSLOduration=3.088168093 podStartE2EDuration="15.227077295s" podCreationTimestamp="2025-11-25 21:46:35 +0000 UTC" firstStartedPulling="2025-11-25 21:46:36.920810242 +0000 UTC m=+952.383286564" lastFinishedPulling="2025-11-25 21:46:49.059719444 +0000 UTC m=+964.522195766" observedRunningTime="2025-11-25 21:46:50.221650751 +0000 UTC m=+965.684127113" watchObservedRunningTime="2025-11-25 21:46:50.227077295 +0000 UTC m=+965.689553637" Nov 25 21:46:51 crc kubenswrapper[4910]: I1125 21:46:51.180989 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" event={"ID":"d9af0ae9-a945-4d74-8472-702a9bef15b0","Type":"ContainerStarted","Data":"57c481379ce1facdd61feadf014328651b918085c9aad69f389601f34685fe25"} Nov 25 21:46:51 crc kubenswrapper[4910]: I1125 21:46:51.181481 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:51 crc kubenswrapper[4910]: I1125 21:46:51.202701 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" podStartSLOduration=8.202681218 podStartE2EDuration="8.202681218s" podCreationTimestamp="2025-11-25 21:46:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:46:51.199590416 +0000 UTC m=+966.662066778" watchObservedRunningTime="2025-11-25 21:46:51.202681218 +0000 UTC m=+966.665157540" Nov 25 21:46:51 crc kubenswrapper[4910]: I1125 21:46:51.895106 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:46:58 crc kubenswrapper[4910]: I1125 21:46:58.723452 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:46:58 crc kubenswrapper[4910]: I1125 21:46:58.850740 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-h8756"] Nov 25 21:46:58 crc kubenswrapper[4910]: I1125 21:46:58.851291 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-h8756" podUID="de327dc7-6e50-4eb8-bc62-bfe861d55d45" containerName="dnsmasq-dns" containerID="cri-o://895859d9d08b4c52a4826f7ebd3b17a15002856be9890110f021a91ed79c95d9" gracePeriod=10 Nov 25 21:46:59 crc kubenswrapper[4910]: E1125 21:46:59.001748 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podde327dc7_6e50_4eb8_bc62_bfe861d55d45.slice/crio-895859d9d08b4c52a4826f7ebd3b17a15002856be9890110f021a91ed79c95d9.scope\": RecentStats: unable to find data in memory cache]" Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.251532 4910 generic.go:334] "Generic (PLEG): container finished" podID="6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5" containerID="e2d911f256192b349ab826bb2617841035c65fd6fc2dd122b5d10cdb5ea5ef73" exitCode=0 Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.251606 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-r42pw" event={"ID":"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5","Type":"ContainerDied","Data":"e2d911f256192b349ab826bb2617841035c65fd6fc2dd122b5d10cdb5ea5ef73"} Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.254279 4910 generic.go:334] "Generic (PLEG): container finished" podID="de327dc7-6e50-4eb8-bc62-bfe861d55d45" containerID="895859d9d08b4c52a4826f7ebd3b17a15002856be9890110f021a91ed79c95d9" exitCode=0 Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.254347 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-h8756" event={"ID":"de327dc7-6e50-4eb8-bc62-bfe861d55d45","Type":"ContainerDied","Data":"895859d9d08b4c52a4826f7ebd3b17a15002856be9890110f021a91ed79c95d9"} Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.362039 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.421874 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-dns-svc\") pod \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.421949 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-ovsdbserver-nb\") pod \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.421969 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-config\") pod \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.422004 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h9w9q\" (UniqueName: \"kubernetes.io/projected/de327dc7-6e50-4eb8-bc62-bfe861d55d45-kube-api-access-h9w9q\") pod \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.422056 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-ovsdbserver-sb\") pod \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\" (UID: \"de327dc7-6e50-4eb8-bc62-bfe861d55d45\") " Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.431103 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de327dc7-6e50-4eb8-bc62-bfe861d55d45-kube-api-access-h9w9q" (OuterVolumeSpecName: "kube-api-access-h9w9q") pod "de327dc7-6e50-4eb8-bc62-bfe861d55d45" (UID: "de327dc7-6e50-4eb8-bc62-bfe861d55d45"). InnerVolumeSpecName "kube-api-access-h9w9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.468552 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "de327dc7-6e50-4eb8-bc62-bfe861d55d45" (UID: "de327dc7-6e50-4eb8-bc62-bfe861d55d45"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.469052 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-config" (OuterVolumeSpecName: "config") pod "de327dc7-6e50-4eb8-bc62-bfe861d55d45" (UID: "de327dc7-6e50-4eb8-bc62-bfe861d55d45"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.470800 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "de327dc7-6e50-4eb8-bc62-bfe861d55d45" (UID: "de327dc7-6e50-4eb8-bc62-bfe861d55d45"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.486808 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "de327dc7-6e50-4eb8-bc62-bfe861d55d45" (UID: "de327dc7-6e50-4eb8-bc62-bfe861d55d45"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.524380 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.524425 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.524437 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.524450 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h9w9q\" (UniqueName: \"kubernetes.io/projected/de327dc7-6e50-4eb8-bc62-bfe861d55d45-kube-api-access-h9w9q\") on node \"crc\" DevicePath \"\"" Nov 25 21:46:59 crc kubenswrapper[4910]: I1125 21:46:59.524459 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/de327dc7-6e50-4eb8-bc62-bfe861d55d45-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.265973 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-h8756" event={"ID":"de327dc7-6e50-4eb8-bc62-bfe861d55d45","Type":"ContainerDied","Data":"e6fe552b94badc5425cd43e2407d67a29b037d681b002fc536c55df8dad2c748"} Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.266045 4910 scope.go:117] "RemoveContainer" containerID="895859d9d08b4c52a4826f7ebd3b17a15002856be9890110f021a91ed79c95d9" Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.265983 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-h8756" Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.296702 4910 scope.go:117] "RemoveContainer" containerID="057825e17ca858c077568f1a0b04e932412a79ee6f8a5d8fce9e5c7fca4079f5" Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.324464 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-h8756"] Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.331594 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-h8756"] Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.717304 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-r42pw" Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.750865 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-config-data\") pod \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.750991 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gdnd5\" (UniqueName: \"kubernetes.io/projected/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-kube-api-access-gdnd5\") pod \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.751159 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-db-sync-config-data\") pod \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.751215 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-combined-ca-bundle\") pod \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\" (UID: \"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5\") " Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.757400 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5" (UID: "6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.762767 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-kube-api-access-gdnd5" (OuterVolumeSpecName: "kube-api-access-gdnd5") pod "6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5" (UID: "6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5"). InnerVolumeSpecName "kube-api-access-gdnd5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.779082 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5" (UID: "6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.813862 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-config-data" (OuterVolumeSpecName: "config-data") pod "6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5" (UID: "6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.853981 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.854032 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gdnd5\" (UniqueName: \"kubernetes.io/projected/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-kube-api-access-gdnd5\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.854044 4910 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:00 crc kubenswrapper[4910]: I1125 21:47:00.854058 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.217121 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de327dc7-6e50-4eb8-bc62-bfe861d55d45" path="/var/lib/kubelet/pods/de327dc7-6e50-4eb8-bc62-bfe861d55d45/volumes" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.279152 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-r42pw" event={"ID":"6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5","Type":"ContainerDied","Data":"b1e22720212ffdbaf795c42eb631e0ef4efbd42af9c0eb4f8a27dbd1b945f16a"} Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.279553 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b1e22720212ffdbaf795c42eb631e0ef4efbd42af9c0eb4f8a27dbd1b945f16a" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.279400 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-r42pw" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.575454 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.798317 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-vwvcl"] Nov 25 21:47:01 crc kubenswrapper[4910]: E1125 21:47:01.798747 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5" containerName="glance-db-sync" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.798765 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5" containerName="glance-db-sync" Nov 25 21:47:01 crc kubenswrapper[4910]: E1125 21:47:01.798784 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de327dc7-6e50-4eb8-bc62-bfe861d55d45" containerName="init" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.798790 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="de327dc7-6e50-4eb8-bc62-bfe861d55d45" containerName="init" Nov 25 21:47:01 crc kubenswrapper[4910]: E1125 21:47:01.798802 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de327dc7-6e50-4eb8-bc62-bfe861d55d45" containerName="dnsmasq-dns" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.798807 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="de327dc7-6e50-4eb8-bc62-bfe861d55d45" containerName="dnsmasq-dns" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.798999 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="de327dc7-6e50-4eb8-bc62-bfe861d55d45" containerName="dnsmasq-dns" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.799030 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5" containerName="glance-db-sync" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.799923 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.830437 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-vwvcl"] Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.973804 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-dns-svc\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.973891 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.973918 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-config\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.974073 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.974277 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:01 crc kubenswrapper[4910]: I1125 21:47:01.974308 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7hc7\" (UniqueName: \"kubernetes.io/projected/deaa9a59-2559-43c0-83fa-2380549d8c88-kube-api-access-b7hc7\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.076800 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.076878 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7hc7\" (UniqueName: \"kubernetes.io/projected/deaa9a59-2559-43c0-83fa-2380549d8c88-kube-api-access-b7hc7\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.076955 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-dns-svc\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.077024 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.077044 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-config\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.077081 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.077833 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-ovsdbserver-sb\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.078015 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-ovsdbserver-nb\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.078321 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-dns-svc\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.078385 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-dns-swift-storage-0\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.078587 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-config\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.089172 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-jk49q"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.090323 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jk49q" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.110867 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-jk49q"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.116642 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7hc7\" (UniqueName: \"kubernetes.io/projected/deaa9a59-2559-43c0-83fa-2380549d8c88-kube-api-access-b7hc7\") pod \"dnsmasq-dns-895cf5cf-vwvcl\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.128016 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-24c5-account-create-update-nzg6c"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.128736 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.129795 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-24c5-account-create-update-nzg6c" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.134552 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.166546 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-24c5-account-create-update-nzg6c"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.249284 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-krdcz"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.251061 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-krdcz" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.251905 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-7fe9-account-create-update-gsm7z"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.260792 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-7fe9-account-create-update-gsm7z" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.268795 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.272789 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-krdcz"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.308341 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eda8c66-7cc6-4516-9f62-d2e54ba7345e-operator-scripts\") pod \"barbican-24c5-account-create-update-nzg6c\" (UID: \"8eda8c66-7cc6-4516-9f62-d2e54ba7345e\") " pod="openstack/barbican-24c5-account-create-update-nzg6c" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.308413 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwqfb\" (UniqueName: \"kubernetes.io/projected/8eda8c66-7cc6-4516-9f62-d2e54ba7345e-kube-api-access-dwqfb\") pod \"barbican-24c5-account-create-update-nzg6c\" (UID: \"8eda8c66-7cc6-4516-9f62-d2e54ba7345e\") " pod="openstack/barbican-24c5-account-create-update-nzg6c" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.308457 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac08d9d1-b92d-4e30-ae39-02e1d31b7545-operator-scripts\") pod \"cinder-7fe9-account-create-update-gsm7z\" (UID: \"ac08d9d1-b92d-4e30-ae39-02e1d31b7545\") " pod="openstack/cinder-7fe9-account-create-update-gsm7z" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.308503 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cfe62699-06b4-4275-a302-15969eef2435-operator-scripts\") pod \"cinder-db-create-jk49q\" (UID: \"cfe62699-06b4-4275-a302-15969eef2435\") " pod="openstack/cinder-db-create-jk49q" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.308541 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngqjn\" (UniqueName: \"kubernetes.io/projected/cfe62699-06b4-4275-a302-15969eef2435-kube-api-access-ngqjn\") pod \"cinder-db-create-jk49q\" (UID: \"cfe62699-06b4-4275-a302-15969eef2435\") " pod="openstack/cinder-db-create-jk49q" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.333387 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-7fe9-account-create-update-gsm7z"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.410218 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngqjn\" (UniqueName: \"kubernetes.io/projected/cfe62699-06b4-4275-a302-15969eef2435-kube-api-access-ngqjn\") pod \"cinder-db-create-jk49q\" (UID: \"cfe62699-06b4-4275-a302-15969eef2435\") " pod="openstack/cinder-db-create-jk49q" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.410331 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3da4b4c4-0881-45a6-b144-c65dd3c93740-operator-scripts\") pod \"barbican-db-create-krdcz\" (UID: \"3da4b4c4-0881-45a6-b144-c65dd3c93740\") " pod="openstack/barbican-db-create-krdcz" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.410378 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6cxqd\" (UniqueName: \"kubernetes.io/projected/ac08d9d1-b92d-4e30-ae39-02e1d31b7545-kube-api-access-6cxqd\") pod \"cinder-7fe9-account-create-update-gsm7z\" (UID: \"ac08d9d1-b92d-4e30-ae39-02e1d31b7545\") " pod="openstack/cinder-7fe9-account-create-update-gsm7z" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.410421 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gws44\" (UniqueName: \"kubernetes.io/projected/3da4b4c4-0881-45a6-b144-c65dd3c93740-kube-api-access-gws44\") pod \"barbican-db-create-krdcz\" (UID: \"3da4b4c4-0881-45a6-b144-c65dd3c93740\") " pod="openstack/barbican-db-create-krdcz" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.410452 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eda8c66-7cc6-4516-9f62-d2e54ba7345e-operator-scripts\") pod \"barbican-24c5-account-create-update-nzg6c\" (UID: \"8eda8c66-7cc6-4516-9f62-d2e54ba7345e\") " pod="openstack/barbican-24c5-account-create-update-nzg6c" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.410473 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwqfb\" (UniqueName: \"kubernetes.io/projected/8eda8c66-7cc6-4516-9f62-d2e54ba7345e-kube-api-access-dwqfb\") pod \"barbican-24c5-account-create-update-nzg6c\" (UID: \"8eda8c66-7cc6-4516-9f62-d2e54ba7345e\") " pod="openstack/barbican-24c5-account-create-update-nzg6c" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.410499 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac08d9d1-b92d-4e30-ae39-02e1d31b7545-operator-scripts\") pod \"cinder-7fe9-account-create-update-gsm7z\" (UID: \"ac08d9d1-b92d-4e30-ae39-02e1d31b7545\") " pod="openstack/cinder-7fe9-account-create-update-gsm7z" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.410532 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cfe62699-06b4-4275-a302-15969eef2435-operator-scripts\") pod \"cinder-db-create-jk49q\" (UID: \"cfe62699-06b4-4275-a302-15969eef2435\") " pod="openstack/cinder-db-create-jk49q" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.414825 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cfe62699-06b4-4275-a302-15969eef2435-operator-scripts\") pod \"cinder-db-create-jk49q\" (UID: \"cfe62699-06b4-4275-a302-15969eef2435\") " pod="openstack/cinder-db-create-jk49q" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.425689 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-sq4pt"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.427599 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-sq4pt" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.431216 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-sq4pt"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.431691 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.431895 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.431986 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-v6h2m" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.432018 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngqjn\" (UniqueName: \"kubernetes.io/projected/cfe62699-06b4-4275-a302-15969eef2435-kube-api-access-ngqjn\") pod \"cinder-db-create-jk49q\" (UID: \"cfe62699-06b4-4275-a302-15969eef2435\") " pod="openstack/cinder-db-create-jk49q" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.431996 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.433768 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac08d9d1-b92d-4e30-ae39-02e1d31b7545-operator-scripts\") pod \"cinder-7fe9-account-create-update-gsm7z\" (UID: \"ac08d9d1-b92d-4e30-ae39-02e1d31b7545\") " pod="openstack/cinder-7fe9-account-create-update-gsm7z" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.434168 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eda8c66-7cc6-4516-9f62-d2e54ba7345e-operator-scripts\") pod \"barbican-24c5-account-create-update-nzg6c\" (UID: \"8eda8c66-7cc6-4516-9f62-d2e54ba7345e\") " pod="openstack/barbican-24c5-account-create-update-nzg6c" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.437777 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwqfb\" (UniqueName: \"kubernetes.io/projected/8eda8c66-7cc6-4516-9f62-d2e54ba7345e-kube-api-access-dwqfb\") pod \"barbican-24c5-account-create-update-nzg6c\" (UID: \"8eda8c66-7cc6-4516-9f62-d2e54ba7345e\") " pod="openstack/barbican-24c5-account-create-update-nzg6c" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.512608 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3da4b4c4-0881-45a6-b144-c65dd3c93740-operator-scripts\") pod \"barbican-db-create-krdcz\" (UID: \"3da4b4c4-0881-45a6-b144-c65dd3c93740\") " pod="openstack/barbican-db-create-krdcz" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.512697 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6cxqd\" (UniqueName: \"kubernetes.io/projected/ac08d9d1-b92d-4e30-ae39-02e1d31b7545-kube-api-access-6cxqd\") pod \"cinder-7fe9-account-create-update-gsm7z\" (UID: \"ac08d9d1-b92d-4e30-ae39-02e1d31b7545\") " pod="openstack/cinder-7fe9-account-create-update-gsm7z" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.512751 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gws44\" (UniqueName: \"kubernetes.io/projected/3da4b4c4-0881-45a6-b144-c65dd3c93740-kube-api-access-gws44\") pod \"barbican-db-create-krdcz\" (UID: \"3da4b4c4-0881-45a6-b144-c65dd3c93740\") " pod="openstack/barbican-db-create-krdcz" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.513910 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3da4b4c4-0881-45a6-b144-c65dd3c93740-operator-scripts\") pod \"barbican-db-create-krdcz\" (UID: \"3da4b4c4-0881-45a6-b144-c65dd3c93740\") " pod="openstack/barbican-db-create-krdcz" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.536203 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-4388-account-create-update-5w896"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.537005 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gws44\" (UniqueName: \"kubernetes.io/projected/3da4b4c4-0881-45a6-b144-c65dd3c93740-kube-api-access-gws44\") pod \"barbican-db-create-krdcz\" (UID: \"3da4b4c4-0881-45a6-b144-c65dd3c93740\") " pod="openstack/barbican-db-create-krdcz" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.538230 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4388-account-create-update-5w896" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.540257 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.542627 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6cxqd\" (UniqueName: \"kubernetes.io/projected/ac08d9d1-b92d-4e30-ae39-02e1d31b7545-kube-api-access-6cxqd\") pod \"cinder-7fe9-account-create-update-gsm7z\" (UID: \"ac08d9d1-b92d-4e30-ae39-02e1d31b7545\") " pod="openstack/cinder-7fe9-account-create-update-gsm7z" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.549914 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-24c5-account-create-update-nzg6c" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.550703 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-v2wl2"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.554152 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-v2wl2" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.565655 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-4388-account-create-update-5w896"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.572175 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-v2wl2"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.604086 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-krdcz" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.613795 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37737937-a670-4168-afca-ff5157233184-config-data\") pod \"keystone-db-sync-sq4pt\" (UID: \"37737937-a670-4168-afca-ff5157233184\") " pod="openstack/keystone-db-sync-sq4pt" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.613921 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37737937-a670-4168-afca-ff5157233184-combined-ca-bundle\") pod \"keystone-db-sync-sq4pt\" (UID: \"37737937-a670-4168-afca-ff5157233184\") " pod="openstack/keystone-db-sync-sq4pt" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.613963 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5gnd\" (UniqueName: \"kubernetes.io/projected/37737937-a670-4168-afca-ff5157233184-kube-api-access-p5gnd\") pod \"keystone-db-sync-sq4pt\" (UID: \"37737937-a670-4168-afca-ff5157233184\") " pod="openstack/keystone-db-sync-sq4pt" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.630949 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-vwvcl"] Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.658795 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-7fe9-account-create-update-gsm7z" Nov 25 21:47:02 crc kubenswrapper[4910]: W1125 21:47:02.663994 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddeaa9a59_2559_43c0_83fa_2380549d8c88.slice/crio-ea94a411bceae6fa0faab97bcc8fc46ba2121a93ea4f99ff38489ef8d2ba4505 WatchSource:0}: Error finding container ea94a411bceae6fa0faab97bcc8fc46ba2121a93ea4f99ff38489ef8d2ba4505: Status 404 returned error can't find the container with id ea94a411bceae6fa0faab97bcc8fc46ba2121a93ea4f99ff38489ef8d2ba4505 Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.705228 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jk49q" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.729638 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69a77984-0bf7-456c-97cc-f2db984fa1f6-operator-scripts\") pod \"neutron-db-create-v2wl2\" (UID: \"69a77984-0bf7-456c-97cc-f2db984fa1f6\") " pod="openstack/neutron-db-create-v2wl2" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.729694 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37737937-a670-4168-afca-ff5157233184-config-data\") pod \"keystone-db-sync-sq4pt\" (UID: \"37737937-a670-4168-afca-ff5157233184\") " pod="openstack/keystone-db-sync-sq4pt" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.729724 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51765f02-63d6-48ec-a2d9-6b12e042c76a-operator-scripts\") pod \"neutron-4388-account-create-update-5w896\" (UID: \"51765f02-63d6-48ec-a2d9-6b12e042c76a\") " pod="openstack/neutron-4388-account-create-update-5w896" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.729818 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7t2b9\" (UniqueName: \"kubernetes.io/projected/69a77984-0bf7-456c-97cc-f2db984fa1f6-kube-api-access-7t2b9\") pod \"neutron-db-create-v2wl2\" (UID: \"69a77984-0bf7-456c-97cc-f2db984fa1f6\") " pod="openstack/neutron-db-create-v2wl2" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.729842 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37737937-a670-4168-afca-ff5157233184-combined-ca-bundle\") pod \"keystone-db-sync-sq4pt\" (UID: \"37737937-a670-4168-afca-ff5157233184\") " pod="openstack/keystone-db-sync-sq4pt" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.729874 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d2wjq\" (UniqueName: \"kubernetes.io/projected/51765f02-63d6-48ec-a2d9-6b12e042c76a-kube-api-access-d2wjq\") pod \"neutron-4388-account-create-update-5w896\" (UID: \"51765f02-63d6-48ec-a2d9-6b12e042c76a\") " pod="openstack/neutron-4388-account-create-update-5w896" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.729898 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5gnd\" (UniqueName: \"kubernetes.io/projected/37737937-a670-4168-afca-ff5157233184-kube-api-access-p5gnd\") pod \"keystone-db-sync-sq4pt\" (UID: \"37737937-a670-4168-afca-ff5157233184\") " pod="openstack/keystone-db-sync-sq4pt" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.737905 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37737937-a670-4168-afca-ff5157233184-config-data\") pod \"keystone-db-sync-sq4pt\" (UID: \"37737937-a670-4168-afca-ff5157233184\") " pod="openstack/keystone-db-sync-sq4pt" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.746616 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37737937-a670-4168-afca-ff5157233184-combined-ca-bundle\") pod \"keystone-db-sync-sq4pt\" (UID: \"37737937-a670-4168-afca-ff5157233184\") " pod="openstack/keystone-db-sync-sq4pt" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.748216 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5gnd\" (UniqueName: \"kubernetes.io/projected/37737937-a670-4168-afca-ff5157233184-kube-api-access-p5gnd\") pod \"keystone-db-sync-sq4pt\" (UID: \"37737937-a670-4168-afca-ff5157233184\") " pod="openstack/keystone-db-sync-sq4pt" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.825626 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-sq4pt" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.832311 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7t2b9\" (UniqueName: \"kubernetes.io/projected/69a77984-0bf7-456c-97cc-f2db984fa1f6-kube-api-access-7t2b9\") pod \"neutron-db-create-v2wl2\" (UID: \"69a77984-0bf7-456c-97cc-f2db984fa1f6\") " pod="openstack/neutron-db-create-v2wl2" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.832367 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d2wjq\" (UniqueName: \"kubernetes.io/projected/51765f02-63d6-48ec-a2d9-6b12e042c76a-kube-api-access-d2wjq\") pod \"neutron-4388-account-create-update-5w896\" (UID: \"51765f02-63d6-48ec-a2d9-6b12e042c76a\") " pod="openstack/neutron-4388-account-create-update-5w896" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.832420 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69a77984-0bf7-456c-97cc-f2db984fa1f6-operator-scripts\") pod \"neutron-db-create-v2wl2\" (UID: \"69a77984-0bf7-456c-97cc-f2db984fa1f6\") " pod="openstack/neutron-db-create-v2wl2" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.832441 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51765f02-63d6-48ec-a2d9-6b12e042c76a-operator-scripts\") pod \"neutron-4388-account-create-update-5w896\" (UID: \"51765f02-63d6-48ec-a2d9-6b12e042c76a\") " pod="openstack/neutron-4388-account-create-update-5w896" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.833258 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51765f02-63d6-48ec-a2d9-6b12e042c76a-operator-scripts\") pod \"neutron-4388-account-create-update-5w896\" (UID: \"51765f02-63d6-48ec-a2d9-6b12e042c76a\") " pod="openstack/neutron-4388-account-create-update-5w896" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.834234 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69a77984-0bf7-456c-97cc-f2db984fa1f6-operator-scripts\") pod \"neutron-db-create-v2wl2\" (UID: \"69a77984-0bf7-456c-97cc-f2db984fa1f6\") " pod="openstack/neutron-db-create-v2wl2" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.856977 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7t2b9\" (UniqueName: \"kubernetes.io/projected/69a77984-0bf7-456c-97cc-f2db984fa1f6-kube-api-access-7t2b9\") pod \"neutron-db-create-v2wl2\" (UID: \"69a77984-0bf7-456c-97cc-f2db984fa1f6\") " pod="openstack/neutron-db-create-v2wl2" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.867866 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d2wjq\" (UniqueName: \"kubernetes.io/projected/51765f02-63d6-48ec-a2d9-6b12e042c76a-kube-api-access-d2wjq\") pod \"neutron-4388-account-create-update-5w896\" (UID: \"51765f02-63d6-48ec-a2d9-6b12e042c76a\") " pod="openstack/neutron-4388-account-create-update-5w896" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.882829 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4388-account-create-update-5w896" Nov 25 21:47:02 crc kubenswrapper[4910]: I1125 21:47:02.899848 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-v2wl2" Nov 25 21:47:03 crc kubenswrapper[4910]: I1125 21:47:03.072440 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-24c5-account-create-update-nzg6c"] Nov 25 21:47:03 crc kubenswrapper[4910]: I1125 21:47:03.225339 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-krdcz"] Nov 25 21:47:03 crc kubenswrapper[4910]: I1125 21:47:03.355754 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-24c5-account-create-update-nzg6c" event={"ID":"8eda8c66-7cc6-4516-9f62-d2e54ba7345e","Type":"ContainerStarted","Data":"b87fb89779e2667e31e06e868fe627d18f97eb4c52b0e9419c1bfc7a53e67d0b"} Nov 25 21:47:03 crc kubenswrapper[4910]: I1125 21:47:03.357545 4910 generic.go:334] "Generic (PLEG): container finished" podID="deaa9a59-2559-43c0-83fa-2380549d8c88" containerID="f112bd96412c513d48213d2dabb77cf3dcaf1bd0d017092c6975663bbeb626b5" exitCode=0 Nov 25 21:47:03 crc kubenswrapper[4910]: I1125 21:47:03.357599 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" event={"ID":"deaa9a59-2559-43c0-83fa-2380549d8c88","Type":"ContainerDied","Data":"f112bd96412c513d48213d2dabb77cf3dcaf1bd0d017092c6975663bbeb626b5"} Nov 25 21:47:03 crc kubenswrapper[4910]: I1125 21:47:03.357621 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" event={"ID":"deaa9a59-2559-43c0-83fa-2380549d8c88","Type":"ContainerStarted","Data":"ea94a411bceae6fa0faab97bcc8fc46ba2121a93ea4f99ff38489ef8d2ba4505"} Nov 25 21:47:03 crc kubenswrapper[4910]: I1125 21:47:03.364008 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-krdcz" event={"ID":"3da4b4c4-0881-45a6-b144-c65dd3c93740","Type":"ContainerStarted","Data":"094b0d38fe07a5b6c773b9770722d611ddeda4c5000bba86fd047ef918378e28"} Nov 25 21:47:03 crc kubenswrapper[4910]: I1125 21:47:03.426137 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-jk49q"] Nov 25 21:47:03 crc kubenswrapper[4910]: I1125 21:47:03.435205 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-7fe9-account-create-update-gsm7z"] Nov 25 21:47:03 crc kubenswrapper[4910]: I1125 21:47:03.518672 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-sq4pt"] Nov 25 21:47:03 crc kubenswrapper[4910]: W1125 21:47:03.534148 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod37737937_a670_4168_afca_ff5157233184.slice/crio-a81f93a713946283d90ebcb30ddc24c71ae81eeb310ad6a75c2d730ec64264a7 WatchSource:0}: Error finding container a81f93a713946283d90ebcb30ddc24c71ae81eeb310ad6a75c2d730ec64264a7: Status 404 returned error can't find the container with id a81f93a713946283d90ebcb30ddc24c71ae81eeb310ad6a75c2d730ec64264a7 Nov 25 21:47:03 crc kubenswrapper[4910]: I1125 21:47:03.642571 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-4388-account-create-update-5w896"] Nov 25 21:47:03 crc kubenswrapper[4910]: I1125 21:47:03.661055 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-v2wl2"] Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.403895 4910 generic.go:334] "Generic (PLEG): container finished" podID="ac08d9d1-b92d-4e30-ae39-02e1d31b7545" containerID="978b6581d3d12beb736377058bc6bc5cf10cc01a5f6690b2782b3dac3e96f341" exitCode=0 Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.424851 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-7fe9-account-create-update-gsm7z" event={"ID":"ac08d9d1-b92d-4e30-ae39-02e1d31b7545","Type":"ContainerDied","Data":"978b6581d3d12beb736377058bc6bc5cf10cc01a5f6690b2782b3dac3e96f341"} Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.424933 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-7fe9-account-create-update-gsm7z" event={"ID":"ac08d9d1-b92d-4e30-ae39-02e1d31b7545","Type":"ContainerStarted","Data":"edfeca7be3e97dd804ea042d99b57c0dcf7beff181b2c2469931859769d4ce37"} Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.454752 4910 generic.go:334] "Generic (PLEG): container finished" podID="51765f02-63d6-48ec-a2d9-6b12e042c76a" containerID="6827e820a8a940d03b264cc4a01017c07c40ada5bdf8281ce277b5fb7c13684a" exitCode=0 Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.454880 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4388-account-create-update-5w896" event={"ID":"51765f02-63d6-48ec-a2d9-6b12e042c76a","Type":"ContainerDied","Data":"6827e820a8a940d03b264cc4a01017c07c40ada5bdf8281ce277b5fb7c13684a"} Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.454915 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4388-account-create-update-5w896" event={"ID":"51765f02-63d6-48ec-a2d9-6b12e042c76a","Type":"ContainerStarted","Data":"ab0035115dc6a8d374a7df2ef73d4ec3183e3b384483fd8f6a7d942fc53cd754"} Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.518570 4910 generic.go:334] "Generic (PLEG): container finished" podID="69a77984-0bf7-456c-97cc-f2db984fa1f6" containerID="9bc0b65a04786da440d32b6d8ce6042ebc0f1304b0bf1a4c728c579c5922eb82" exitCode=0 Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.519071 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-v2wl2" event={"ID":"69a77984-0bf7-456c-97cc-f2db984fa1f6","Type":"ContainerDied","Data":"9bc0b65a04786da440d32b6d8ce6042ebc0f1304b0bf1a4c728c579c5922eb82"} Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.519172 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-v2wl2" event={"ID":"69a77984-0bf7-456c-97cc-f2db984fa1f6","Type":"ContainerStarted","Data":"66f020d2293254cfd1d18493566a92bcef4d210a7028cf72d5d07fb547c97f90"} Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.555311 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-sq4pt" event={"ID":"37737937-a670-4168-afca-ff5157233184","Type":"ContainerStarted","Data":"a81f93a713946283d90ebcb30ddc24c71ae81eeb310ad6a75c2d730ec64264a7"} Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.590853 4910 generic.go:334] "Generic (PLEG): container finished" podID="8eda8c66-7cc6-4516-9f62-d2e54ba7345e" containerID="6bc31831bf45f15d586f1ccc78f2279150243f53b01bc0680e3e0fe25b0f8dd2" exitCode=0 Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.590978 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-24c5-account-create-update-nzg6c" event={"ID":"8eda8c66-7cc6-4516-9f62-d2e54ba7345e","Type":"ContainerDied","Data":"6bc31831bf45f15d586f1ccc78f2279150243f53b01bc0680e3e0fe25b0f8dd2"} Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.611566 4910 generic.go:334] "Generic (PLEG): container finished" podID="cfe62699-06b4-4275-a302-15969eef2435" containerID="7309a460941907cb78a18937c8ef9484ddc0d443e13f7732170e4dd471d50805" exitCode=0 Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.611648 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-jk49q" event={"ID":"cfe62699-06b4-4275-a302-15969eef2435","Type":"ContainerDied","Data":"7309a460941907cb78a18937c8ef9484ddc0d443e13f7732170e4dd471d50805"} Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.611681 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-jk49q" event={"ID":"cfe62699-06b4-4275-a302-15969eef2435","Type":"ContainerStarted","Data":"2c4ad2fef5f301e74fa157e28180f9a5c5d89e2ac3d69c57edec3da38855866d"} Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.622420 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" event={"ID":"deaa9a59-2559-43c0-83fa-2380549d8c88","Type":"ContainerStarted","Data":"ff6b5ed5ea80ff535de8c8d25f1a5a38d242dbd9d977c071b42adee5d842f27f"} Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.623594 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.632773 4910 generic.go:334] "Generic (PLEG): container finished" podID="3da4b4c4-0881-45a6-b144-c65dd3c93740" containerID="06cd7fd5468cb58b313eeedcfe1ca3e0e24865bcdd66699699dfdfebedc61723" exitCode=0 Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.632835 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-krdcz" event={"ID":"3da4b4c4-0881-45a6-b144-c65dd3c93740","Type":"ContainerDied","Data":"06cd7fd5468cb58b313eeedcfe1ca3e0e24865bcdd66699699dfdfebedc61723"} Nov 25 21:47:04 crc kubenswrapper[4910]: I1125 21:47:04.664604 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" podStartSLOduration=3.664588853 podStartE2EDuration="3.664588853s" podCreationTimestamp="2025-11-25 21:47:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:04.655709637 +0000 UTC m=+980.118185959" watchObservedRunningTime="2025-11-25 21:47:04.664588853 +0000 UTC m=+980.127065175" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.068360 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-7fe9-account-create-update-gsm7z" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.240192 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac08d9d1-b92d-4e30-ae39-02e1d31b7545-operator-scripts\") pod \"ac08d9d1-b92d-4e30-ae39-02e1d31b7545\" (UID: \"ac08d9d1-b92d-4e30-ae39-02e1d31b7545\") " Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.240340 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6cxqd\" (UniqueName: \"kubernetes.io/projected/ac08d9d1-b92d-4e30-ae39-02e1d31b7545-kube-api-access-6cxqd\") pod \"ac08d9d1-b92d-4e30-ae39-02e1d31b7545\" (UID: \"ac08d9d1-b92d-4e30-ae39-02e1d31b7545\") " Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.240876 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac08d9d1-b92d-4e30-ae39-02e1d31b7545-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ac08d9d1-b92d-4e30-ae39-02e1d31b7545" (UID: "ac08d9d1-b92d-4e30-ae39-02e1d31b7545"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.241309 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ac08d9d1-b92d-4e30-ae39-02e1d31b7545-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.245938 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac08d9d1-b92d-4e30-ae39-02e1d31b7545-kube-api-access-6cxqd" (OuterVolumeSpecName: "kube-api-access-6cxqd") pod "ac08d9d1-b92d-4e30-ae39-02e1d31b7545" (UID: "ac08d9d1-b92d-4e30-ae39-02e1d31b7545"). InnerVolumeSpecName "kube-api-access-6cxqd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.343345 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6cxqd\" (UniqueName: \"kubernetes.io/projected/ac08d9d1-b92d-4e30-ae39-02e1d31b7545-kube-api-access-6cxqd\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.361647 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-krdcz" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.364599 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-24c5-account-create-update-nzg6c" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.370788 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-v2wl2" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.383742 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jk49q" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.390494 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4388-account-create-update-5w896" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.546008 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwqfb\" (UniqueName: \"kubernetes.io/projected/8eda8c66-7cc6-4516-9f62-d2e54ba7345e-kube-api-access-dwqfb\") pod \"8eda8c66-7cc6-4516-9f62-d2e54ba7345e\" (UID: \"8eda8c66-7cc6-4516-9f62-d2e54ba7345e\") " Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.546149 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3da4b4c4-0881-45a6-b144-c65dd3c93740-operator-scripts\") pod \"3da4b4c4-0881-45a6-b144-c65dd3c93740\" (UID: \"3da4b4c4-0881-45a6-b144-c65dd3c93740\") " Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.546220 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gws44\" (UniqueName: \"kubernetes.io/projected/3da4b4c4-0881-45a6-b144-c65dd3c93740-kube-api-access-gws44\") pod \"3da4b4c4-0881-45a6-b144-c65dd3c93740\" (UID: \"3da4b4c4-0881-45a6-b144-c65dd3c93740\") " Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.546270 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7t2b9\" (UniqueName: \"kubernetes.io/projected/69a77984-0bf7-456c-97cc-f2db984fa1f6-kube-api-access-7t2b9\") pod \"69a77984-0bf7-456c-97cc-f2db984fa1f6\" (UID: \"69a77984-0bf7-456c-97cc-f2db984fa1f6\") " Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.546290 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngqjn\" (UniqueName: \"kubernetes.io/projected/cfe62699-06b4-4275-a302-15969eef2435-kube-api-access-ngqjn\") pod \"cfe62699-06b4-4275-a302-15969eef2435\" (UID: \"cfe62699-06b4-4275-a302-15969eef2435\") " Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.546308 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51765f02-63d6-48ec-a2d9-6b12e042c76a-operator-scripts\") pod \"51765f02-63d6-48ec-a2d9-6b12e042c76a\" (UID: \"51765f02-63d6-48ec-a2d9-6b12e042c76a\") " Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.546337 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eda8c66-7cc6-4516-9f62-d2e54ba7345e-operator-scripts\") pod \"8eda8c66-7cc6-4516-9f62-d2e54ba7345e\" (UID: \"8eda8c66-7cc6-4516-9f62-d2e54ba7345e\") " Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.546361 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cfe62699-06b4-4275-a302-15969eef2435-operator-scripts\") pod \"cfe62699-06b4-4275-a302-15969eef2435\" (UID: \"cfe62699-06b4-4275-a302-15969eef2435\") " Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.546388 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69a77984-0bf7-456c-97cc-f2db984fa1f6-operator-scripts\") pod \"69a77984-0bf7-456c-97cc-f2db984fa1f6\" (UID: \"69a77984-0bf7-456c-97cc-f2db984fa1f6\") " Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.546407 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d2wjq\" (UniqueName: \"kubernetes.io/projected/51765f02-63d6-48ec-a2d9-6b12e042c76a-kube-api-access-d2wjq\") pod \"51765f02-63d6-48ec-a2d9-6b12e042c76a\" (UID: \"51765f02-63d6-48ec-a2d9-6b12e042c76a\") " Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.547424 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/51765f02-63d6-48ec-a2d9-6b12e042c76a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "51765f02-63d6-48ec-a2d9-6b12e042c76a" (UID: "51765f02-63d6-48ec-a2d9-6b12e042c76a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.548141 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfe62699-06b4-4275-a302-15969eef2435-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "cfe62699-06b4-4275-a302-15969eef2435" (UID: "cfe62699-06b4-4275-a302-15969eef2435"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.548364 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8eda8c66-7cc6-4516-9f62-d2e54ba7345e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8eda8c66-7cc6-4516-9f62-d2e54ba7345e" (UID: "8eda8c66-7cc6-4516-9f62-d2e54ba7345e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.548531 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3da4b4c4-0881-45a6-b144-c65dd3c93740-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3da4b4c4-0881-45a6-b144-c65dd3c93740" (UID: "3da4b4c4-0881-45a6-b144-c65dd3c93740"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.548915 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/69a77984-0bf7-456c-97cc-f2db984fa1f6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "69a77984-0bf7-456c-97cc-f2db984fa1f6" (UID: "69a77984-0bf7-456c-97cc-f2db984fa1f6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.551744 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3da4b4c4-0881-45a6-b144-c65dd3c93740-kube-api-access-gws44" (OuterVolumeSpecName: "kube-api-access-gws44") pod "3da4b4c4-0881-45a6-b144-c65dd3c93740" (UID: "3da4b4c4-0881-45a6-b144-c65dd3c93740"). InnerVolumeSpecName "kube-api-access-gws44". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.552168 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51765f02-63d6-48ec-a2d9-6b12e042c76a-kube-api-access-d2wjq" (OuterVolumeSpecName: "kube-api-access-d2wjq") pod "51765f02-63d6-48ec-a2d9-6b12e042c76a" (UID: "51765f02-63d6-48ec-a2d9-6b12e042c76a"). InnerVolumeSpecName "kube-api-access-d2wjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.552733 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8eda8c66-7cc6-4516-9f62-d2e54ba7345e-kube-api-access-dwqfb" (OuterVolumeSpecName: "kube-api-access-dwqfb") pod "8eda8c66-7cc6-4516-9f62-d2e54ba7345e" (UID: "8eda8c66-7cc6-4516-9f62-d2e54ba7345e"). InnerVolumeSpecName "kube-api-access-dwqfb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.553733 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69a77984-0bf7-456c-97cc-f2db984fa1f6-kube-api-access-7t2b9" (OuterVolumeSpecName: "kube-api-access-7t2b9") pod "69a77984-0bf7-456c-97cc-f2db984fa1f6" (UID: "69a77984-0bf7-456c-97cc-f2db984fa1f6"). InnerVolumeSpecName "kube-api-access-7t2b9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.575755 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfe62699-06b4-4275-a302-15969eef2435-kube-api-access-ngqjn" (OuterVolumeSpecName: "kube-api-access-ngqjn") pod "cfe62699-06b4-4275-a302-15969eef2435" (UID: "cfe62699-06b4-4275-a302-15969eef2435"). InnerVolumeSpecName "kube-api-access-ngqjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.650441 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3da4b4c4-0881-45a6-b144-c65dd3c93740-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.650483 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gws44\" (UniqueName: \"kubernetes.io/projected/3da4b4c4-0881-45a6-b144-c65dd3c93740-kube-api-access-gws44\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.650495 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7t2b9\" (UniqueName: \"kubernetes.io/projected/69a77984-0bf7-456c-97cc-f2db984fa1f6-kube-api-access-7t2b9\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.650505 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngqjn\" (UniqueName: \"kubernetes.io/projected/cfe62699-06b4-4275-a302-15969eef2435-kube-api-access-ngqjn\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.650520 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/51765f02-63d6-48ec-a2d9-6b12e042c76a-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.650558 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8eda8c66-7cc6-4516-9f62-d2e54ba7345e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.650569 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/cfe62699-06b4-4275-a302-15969eef2435-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.650583 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/69a77984-0bf7-456c-97cc-f2db984fa1f6-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.650596 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d2wjq\" (UniqueName: \"kubernetes.io/projected/51765f02-63d6-48ec-a2d9-6b12e042c76a-kube-api-access-d2wjq\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.650609 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwqfb\" (UniqueName: \"kubernetes.io/projected/8eda8c66-7cc6-4516-9f62-d2e54ba7345e-kube-api-access-dwqfb\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.663509 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-4388-account-create-update-5w896" event={"ID":"51765f02-63d6-48ec-a2d9-6b12e042c76a","Type":"ContainerDied","Data":"ab0035115dc6a8d374a7df2ef73d4ec3183e3b384483fd8f6a7d942fc53cd754"} Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.663564 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ab0035115dc6a8d374a7df2ef73d4ec3183e3b384483fd8f6a7d942fc53cd754" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.663629 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-4388-account-create-update-5w896" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.665821 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-v2wl2" event={"ID":"69a77984-0bf7-456c-97cc-f2db984fa1f6","Type":"ContainerDied","Data":"66f020d2293254cfd1d18493566a92bcef4d210a7028cf72d5d07fb547c97f90"} Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.665859 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="66f020d2293254cfd1d18493566a92bcef4d210a7028cf72d5d07fb547c97f90" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.666042 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-v2wl2" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.671134 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-24c5-account-create-update-nzg6c" event={"ID":"8eda8c66-7cc6-4516-9f62-d2e54ba7345e","Type":"ContainerDied","Data":"b87fb89779e2667e31e06e868fe627d18f97eb4c52b0e9419c1bfc7a53e67d0b"} Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.671162 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b87fb89779e2667e31e06e868fe627d18f97eb4c52b0e9419c1bfc7a53e67d0b" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.671227 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-24c5-account-create-update-nzg6c" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.685712 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-jk49q" event={"ID":"cfe62699-06b4-4275-a302-15969eef2435","Type":"ContainerDied","Data":"2c4ad2fef5f301e74fa157e28180f9a5c5d89e2ac3d69c57edec3da38855866d"} Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.685754 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c4ad2fef5f301e74fa157e28180f9a5c5d89e2ac3d69c57edec3da38855866d" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.685808 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-jk49q" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.691593 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-krdcz" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.691584 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-krdcz" event={"ID":"3da4b4c4-0881-45a6-b144-c65dd3c93740","Type":"ContainerDied","Data":"094b0d38fe07a5b6c773b9770722d611ddeda4c5000bba86fd047ef918378e28"} Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.691733 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="094b0d38fe07a5b6c773b9770722d611ddeda4c5000bba86fd047ef918378e28" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.693345 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-7fe9-account-create-update-gsm7z" Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.693409 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-7fe9-account-create-update-gsm7z" event={"ID":"ac08d9d1-b92d-4e30-ae39-02e1d31b7545","Type":"ContainerDied","Data":"edfeca7be3e97dd804ea042d99b57c0dcf7beff181b2c2469931859769d4ce37"} Nov 25 21:47:06 crc kubenswrapper[4910]: I1125 21:47:06.693497 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="edfeca7be3e97dd804ea042d99b57c0dcf7beff181b2c2469931859769d4ce37" Nov 25 21:47:10 crc kubenswrapper[4910]: I1125 21:47:10.737132 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-sq4pt" event={"ID":"37737937-a670-4168-afca-ff5157233184","Type":"ContainerStarted","Data":"f0e7f65e6221131bc0c882d017ba379345fdf9bb179ce7b58e924d329b1dbb62"} Nov 25 21:47:10 crc kubenswrapper[4910]: I1125 21:47:10.769826 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-sq4pt" podStartSLOduration=2.403863614 podStartE2EDuration="8.769807078s" podCreationTimestamp="2025-11-25 21:47:02 +0000 UTC" firstStartedPulling="2025-11-25 21:47:03.540102067 +0000 UTC m=+979.002578389" lastFinishedPulling="2025-11-25 21:47:09.906045521 +0000 UTC m=+985.368521853" observedRunningTime="2025-11-25 21:47:10.761875918 +0000 UTC m=+986.224352250" watchObservedRunningTime="2025-11-25 21:47:10.769807078 +0000 UTC m=+986.232283400" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.130898 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.198276 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-8hktq"] Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.198655 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" podUID="d9af0ae9-a945-4d74-8472-702a9bef15b0" containerName="dnsmasq-dns" containerID="cri-o://57c481379ce1facdd61feadf014328651b918085c9aad69f389601f34685fe25" gracePeriod=10 Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.724809 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.758769 4910 generic.go:334] "Generic (PLEG): container finished" podID="d9af0ae9-a945-4d74-8472-702a9bef15b0" containerID="57c481379ce1facdd61feadf014328651b918085c9aad69f389601f34685fe25" exitCode=0 Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.758820 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" event={"ID":"d9af0ae9-a945-4d74-8472-702a9bef15b0","Type":"ContainerDied","Data":"57c481379ce1facdd61feadf014328651b918085c9aad69f389601f34685fe25"} Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.758836 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.758860 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6d5b6d6b67-8hktq" event={"ID":"d9af0ae9-a945-4d74-8472-702a9bef15b0","Type":"ContainerDied","Data":"1c3724778be98a858449e7aef6a0a5748e8932fe08bbe5b5f917862fa59f2245"} Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.758884 4910 scope.go:117] "RemoveContainer" containerID="57c481379ce1facdd61feadf014328651b918085c9aad69f389601f34685fe25" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.784489 4910 scope.go:117] "RemoveContainer" containerID="dd70cc8fd201df03477356224bd1566c8f851a8633e4d3f0b1adf5989a88f048" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.834689 4910 scope.go:117] "RemoveContainer" containerID="57c481379ce1facdd61feadf014328651b918085c9aad69f389601f34685fe25" Nov 25 21:47:12 crc kubenswrapper[4910]: E1125 21:47:12.835153 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57c481379ce1facdd61feadf014328651b918085c9aad69f389601f34685fe25\": container with ID starting with 57c481379ce1facdd61feadf014328651b918085c9aad69f389601f34685fe25 not found: ID does not exist" containerID="57c481379ce1facdd61feadf014328651b918085c9aad69f389601f34685fe25" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.835198 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57c481379ce1facdd61feadf014328651b918085c9aad69f389601f34685fe25"} err="failed to get container status \"57c481379ce1facdd61feadf014328651b918085c9aad69f389601f34685fe25\": rpc error: code = NotFound desc = could not find container \"57c481379ce1facdd61feadf014328651b918085c9aad69f389601f34685fe25\": container with ID starting with 57c481379ce1facdd61feadf014328651b918085c9aad69f389601f34685fe25 not found: ID does not exist" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.835230 4910 scope.go:117] "RemoveContainer" containerID="dd70cc8fd201df03477356224bd1566c8f851a8633e4d3f0b1adf5989a88f048" Nov 25 21:47:12 crc kubenswrapper[4910]: E1125 21:47:12.841824 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd70cc8fd201df03477356224bd1566c8f851a8633e4d3f0b1adf5989a88f048\": container with ID starting with dd70cc8fd201df03477356224bd1566c8f851a8633e4d3f0b1adf5989a88f048 not found: ID does not exist" containerID="dd70cc8fd201df03477356224bd1566c8f851a8633e4d3f0b1adf5989a88f048" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.841871 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd70cc8fd201df03477356224bd1566c8f851a8633e4d3f0b1adf5989a88f048"} err="failed to get container status \"dd70cc8fd201df03477356224bd1566c8f851a8633e4d3f0b1adf5989a88f048\": rpc error: code = NotFound desc = could not find container \"dd70cc8fd201df03477356224bd1566c8f851a8633e4d3f0b1adf5989a88f048\": container with ID starting with dd70cc8fd201df03477356224bd1566c8f851a8633e4d3f0b1adf5989a88f048 not found: ID does not exist" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.865003 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-dns-svc\") pod \"d9af0ae9-a945-4d74-8472-702a9bef15b0\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.865096 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-dns-swift-storage-0\") pod \"d9af0ae9-a945-4d74-8472-702a9bef15b0\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.865117 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-ovsdbserver-nb\") pod \"d9af0ae9-a945-4d74-8472-702a9bef15b0\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.865139 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t8pmg\" (UniqueName: \"kubernetes.io/projected/d9af0ae9-a945-4d74-8472-702a9bef15b0-kube-api-access-t8pmg\") pod \"d9af0ae9-a945-4d74-8472-702a9bef15b0\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.865182 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-ovsdbserver-sb\") pod \"d9af0ae9-a945-4d74-8472-702a9bef15b0\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.865260 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-config\") pod \"d9af0ae9-a945-4d74-8472-702a9bef15b0\" (UID: \"d9af0ae9-a945-4d74-8472-702a9bef15b0\") " Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.872525 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9af0ae9-a945-4d74-8472-702a9bef15b0-kube-api-access-t8pmg" (OuterVolumeSpecName: "kube-api-access-t8pmg") pod "d9af0ae9-a945-4d74-8472-702a9bef15b0" (UID: "d9af0ae9-a945-4d74-8472-702a9bef15b0"). InnerVolumeSpecName "kube-api-access-t8pmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.916850 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d9af0ae9-a945-4d74-8472-702a9bef15b0" (UID: "d9af0ae9-a945-4d74-8472-702a9bef15b0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.917139 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d9af0ae9-a945-4d74-8472-702a9bef15b0" (UID: "d9af0ae9-a945-4d74-8472-702a9bef15b0"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.920147 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d9af0ae9-a945-4d74-8472-702a9bef15b0" (UID: "d9af0ae9-a945-4d74-8472-702a9bef15b0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.935992 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d9af0ae9-a945-4d74-8472-702a9bef15b0" (UID: "d9af0ae9-a945-4d74-8472-702a9bef15b0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.950864 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-config" (OuterVolumeSpecName: "config") pod "d9af0ae9-a945-4d74-8472-702a9bef15b0" (UID: "d9af0ae9-a945-4d74-8472-702a9bef15b0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.966764 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.966795 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.966809 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.966819 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t8pmg\" (UniqueName: \"kubernetes.io/projected/d9af0ae9-a945-4d74-8472-702a9bef15b0-kube-api-access-t8pmg\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.966828 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:12 crc kubenswrapper[4910]: I1125 21:47:12.966839 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d9af0ae9-a945-4d74-8472-702a9bef15b0-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:13 crc kubenswrapper[4910]: I1125 21:47:13.091324 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-8hktq"] Nov 25 21:47:13 crc kubenswrapper[4910]: I1125 21:47:13.098105 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6d5b6d6b67-8hktq"] Nov 25 21:47:13 crc kubenswrapper[4910]: I1125 21:47:13.218991 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9af0ae9-a945-4d74-8472-702a9bef15b0" path="/var/lib/kubelet/pods/d9af0ae9-a945-4d74-8472-702a9bef15b0/volumes" Nov 25 21:47:13 crc kubenswrapper[4910]: I1125 21:47:13.772779 4910 generic.go:334] "Generic (PLEG): container finished" podID="37737937-a670-4168-afca-ff5157233184" containerID="f0e7f65e6221131bc0c882d017ba379345fdf9bb179ce7b58e924d329b1dbb62" exitCode=0 Nov 25 21:47:13 crc kubenswrapper[4910]: I1125 21:47:13.772840 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-sq4pt" event={"ID":"37737937-a670-4168-afca-ff5157233184","Type":"ContainerDied","Data":"f0e7f65e6221131bc0c882d017ba379345fdf9bb179ce7b58e924d329b1dbb62"} Nov 25 21:47:15 crc kubenswrapper[4910]: I1125 21:47:15.172007 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-sq4pt" Nov 25 21:47:15 crc kubenswrapper[4910]: I1125 21:47:15.309835 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37737937-a670-4168-afca-ff5157233184-combined-ca-bundle\") pod \"37737937-a670-4168-afca-ff5157233184\" (UID: \"37737937-a670-4168-afca-ff5157233184\") " Nov 25 21:47:15 crc kubenswrapper[4910]: I1125 21:47:15.309903 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5gnd\" (UniqueName: \"kubernetes.io/projected/37737937-a670-4168-afca-ff5157233184-kube-api-access-p5gnd\") pod \"37737937-a670-4168-afca-ff5157233184\" (UID: \"37737937-a670-4168-afca-ff5157233184\") " Nov 25 21:47:15 crc kubenswrapper[4910]: I1125 21:47:15.309933 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37737937-a670-4168-afca-ff5157233184-config-data\") pod \"37737937-a670-4168-afca-ff5157233184\" (UID: \"37737937-a670-4168-afca-ff5157233184\") " Nov 25 21:47:15 crc kubenswrapper[4910]: I1125 21:47:15.318973 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37737937-a670-4168-afca-ff5157233184-kube-api-access-p5gnd" (OuterVolumeSpecName: "kube-api-access-p5gnd") pod "37737937-a670-4168-afca-ff5157233184" (UID: "37737937-a670-4168-afca-ff5157233184"). InnerVolumeSpecName "kube-api-access-p5gnd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:15 crc kubenswrapper[4910]: I1125 21:47:15.339449 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37737937-a670-4168-afca-ff5157233184-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "37737937-a670-4168-afca-ff5157233184" (UID: "37737937-a670-4168-afca-ff5157233184"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:15 crc kubenswrapper[4910]: I1125 21:47:15.381073 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37737937-a670-4168-afca-ff5157233184-config-data" (OuterVolumeSpecName: "config-data") pod "37737937-a670-4168-afca-ff5157233184" (UID: "37737937-a670-4168-afca-ff5157233184"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:15 crc kubenswrapper[4910]: I1125 21:47:15.413201 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37737937-a670-4168-afca-ff5157233184-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:15 crc kubenswrapper[4910]: I1125 21:47:15.413323 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5gnd\" (UniqueName: \"kubernetes.io/projected/37737937-a670-4168-afca-ff5157233184-kube-api-access-p5gnd\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:15 crc kubenswrapper[4910]: I1125 21:47:15.413352 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37737937-a670-4168-afca-ff5157233184-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:15 crc kubenswrapper[4910]: I1125 21:47:15.793276 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-sq4pt" event={"ID":"37737937-a670-4168-afca-ff5157233184","Type":"ContainerDied","Data":"a81f93a713946283d90ebcb30ddc24c71ae81eeb310ad6a75c2d730ec64264a7"} Nov 25 21:47:15 crc kubenswrapper[4910]: I1125 21:47:15.793319 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a81f93a713946283d90ebcb30ddc24c71ae81eeb310ad6a75c2d730ec64264a7" Nov 25 21:47:15 crc kubenswrapper[4910]: I1125 21:47:15.793326 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-sq4pt" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.059151 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-74bbs"] Nov 25 21:47:16 crc kubenswrapper[4910]: E1125 21:47:16.059747 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8eda8c66-7cc6-4516-9f62-d2e54ba7345e" containerName="mariadb-account-create-update" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.059759 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="8eda8c66-7cc6-4516-9f62-d2e54ba7345e" containerName="mariadb-account-create-update" Nov 25 21:47:16 crc kubenswrapper[4910]: E1125 21:47:16.059771 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3da4b4c4-0881-45a6-b144-c65dd3c93740" containerName="mariadb-database-create" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.059777 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3da4b4c4-0881-45a6-b144-c65dd3c93740" containerName="mariadb-database-create" Nov 25 21:47:16 crc kubenswrapper[4910]: E1125 21:47:16.059786 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfe62699-06b4-4275-a302-15969eef2435" containerName="mariadb-database-create" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.059794 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfe62699-06b4-4275-a302-15969eef2435" containerName="mariadb-database-create" Nov 25 21:47:16 crc kubenswrapper[4910]: E1125 21:47:16.059817 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac08d9d1-b92d-4e30-ae39-02e1d31b7545" containerName="mariadb-account-create-update" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.059826 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac08d9d1-b92d-4e30-ae39-02e1d31b7545" containerName="mariadb-account-create-update" Nov 25 21:47:16 crc kubenswrapper[4910]: E1125 21:47:16.059861 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69a77984-0bf7-456c-97cc-f2db984fa1f6" containerName="mariadb-database-create" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.059868 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="69a77984-0bf7-456c-97cc-f2db984fa1f6" containerName="mariadb-database-create" Nov 25 21:47:16 crc kubenswrapper[4910]: E1125 21:47:16.059879 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51765f02-63d6-48ec-a2d9-6b12e042c76a" containerName="mariadb-account-create-update" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.059885 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="51765f02-63d6-48ec-a2d9-6b12e042c76a" containerName="mariadb-account-create-update" Nov 25 21:47:16 crc kubenswrapper[4910]: E1125 21:47:16.059907 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9af0ae9-a945-4d74-8472-702a9bef15b0" containerName="init" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.059912 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9af0ae9-a945-4d74-8472-702a9bef15b0" containerName="init" Nov 25 21:47:16 crc kubenswrapper[4910]: E1125 21:47:16.059922 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37737937-a670-4168-afca-ff5157233184" containerName="keystone-db-sync" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.059928 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="37737937-a670-4168-afca-ff5157233184" containerName="keystone-db-sync" Nov 25 21:47:16 crc kubenswrapper[4910]: E1125 21:47:16.059937 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9af0ae9-a945-4d74-8472-702a9bef15b0" containerName="dnsmasq-dns" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.059943 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9af0ae9-a945-4d74-8472-702a9bef15b0" containerName="dnsmasq-dns" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.060088 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="51765f02-63d6-48ec-a2d9-6b12e042c76a" containerName="mariadb-account-create-update" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.060101 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9af0ae9-a945-4d74-8472-702a9bef15b0" containerName="dnsmasq-dns" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.060110 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="69a77984-0bf7-456c-97cc-f2db984fa1f6" containerName="mariadb-database-create" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.060122 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="8eda8c66-7cc6-4516-9f62-d2e54ba7345e" containerName="mariadb-account-create-update" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.060133 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3da4b4c4-0881-45a6-b144-c65dd3c93740" containerName="mariadb-database-create" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.060143 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac08d9d1-b92d-4e30-ae39-02e1d31b7545" containerName="mariadb-account-create-update" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.060157 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="37737937-a670-4168-afca-ff5157233184" containerName="keystone-db-sync" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.060167 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfe62699-06b4-4275-a302-15969eef2435" containerName="mariadb-database-create" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.060959 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.109101 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-74bbs"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.120868 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-lhjdg"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.126691 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.132100 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.132504 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.133380 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.134694 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-v6h2m" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.141641 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.223670 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-lhjdg"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.236424 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-config\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.236517 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5t2cg\" (UniqueName: \"kubernetes.io/projected/2666f85f-8913-4931-80d7-42daa0c5749d-kube-api-access-5t2cg\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.236550 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.236574 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-config-data\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.236596 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-fernet-keys\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.236638 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.236688 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-scripts\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.236723 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.236749 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.236770 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-combined-ca-bundle\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.236796 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-credential-keys\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.236819 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kt8mr\" (UniqueName: \"kubernetes.io/projected/ba30a43d-f3c2-4319-8260-a0431d9a734c-kube-api-access-kt8mr\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.271010 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-79b94b5d95-k97q6"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.272820 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.280775 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.280896 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-kvzdl" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.281083 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.281972 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.320822 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-79b94b5d95-k97q6"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.339442 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-scripts\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.339530 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.339560 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-combined-ca-bundle\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.339577 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.339597 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-credential-keys\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.339615 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kt8mr\" (UniqueName: \"kubernetes.io/projected/ba30a43d-f3c2-4319-8260-a0431d9a734c-kube-api-access-kt8mr\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.339666 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-config\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.339708 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5t2cg\" (UniqueName: \"kubernetes.io/projected/2666f85f-8913-4931-80d7-42daa0c5749d-kube-api-access-5t2cg\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.339729 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.339751 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-config-data\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.339769 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-fernet-keys\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.339800 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.341047 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-ovsdbserver-nb\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.342394 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-config\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.343196 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-ovsdbserver-sb\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.345494 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-xx5nj"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.348350 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-dns-swift-storage-0\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.349075 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-dns-svc\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.349682 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.354484 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.354508 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-combined-ca-bundle\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.355271 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-config-data\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.357993 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-scripts\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.369838 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.370064 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-ctxhm" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.379935 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-credential-keys\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.380401 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-fernet-keys\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.388307 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-xx5nj"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.397994 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kt8mr\" (UniqueName: \"kubernetes.io/projected/ba30a43d-f3c2-4319-8260-a0431d9a734c-kube-api-access-kt8mr\") pod \"keystone-bootstrap-lhjdg\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.407150 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.410135 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.413813 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.414000 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.414169 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5t2cg\" (UniqueName: \"kubernetes.io/projected/2666f85f-8913-4931-80d7-42daa0c5749d-kube-api-access-5t2cg\") pod \"dnsmasq-dns-6c9c9f998c-74bbs\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.431822 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-dvs77"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.433189 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-dvs77" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.442088 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-config-data\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.442175 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a49af42e-3a80-4f2a-9b4b-f43946a32c49-scripts\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.442233 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6r8dw\" (UniqueName: \"kubernetes.io/projected/a49af42e-3a80-4f2a-9b4b-f43946a32c49-kube-api-access-6r8dw\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.442282 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-scripts\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.442323 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntfr4\" (UniqueName: \"kubernetes.io/projected/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-kube-api-access-ntfr4\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.442369 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a49af42e-3a80-4f2a-9b4b-f43946a32c49-logs\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.442399 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-combined-ca-bundle\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.442436 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a49af42e-3a80-4f2a-9b4b-f43946a32c49-horizon-secret-key\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.442479 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a49af42e-3a80-4f2a-9b4b-f43946a32c49-config-data\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.442499 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-etc-machine-id\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.442524 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-db-sync-config-data\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.443159 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-2s7pb" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.443592 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.443871 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.447635 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.470111 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-dvs77"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.503918 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.537125 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-74bbs"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.545085 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-combined-ca-bundle\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.553541 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-config-data\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.553587 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-scripts\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.553624 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a49af42e-3a80-4f2a-9b4b-f43946a32c49-horizon-secret-key\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.553872 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a49af42e-3a80-4f2a-9b4b-f43946a32c49-config-data\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.553893 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-etc-machine-id\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.553920 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-db-sync-config-data\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.553960 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-config-data\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.553978 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzmb9\" (UniqueName: \"kubernetes.io/projected/19f21ae5-3e49-410e-a481-00e837d94c6c-kube-api-access-mzmb9\") pod \"neutron-db-sync-dvs77\" (UID: \"19f21ae5-3e49-410e-a481-00e837d94c6c\") " pod="openstack/neutron-db-sync-dvs77" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.554018 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.554059 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a49af42e-3a80-4f2a-9b4b-f43946a32c49-scripts\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.554078 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65f64c1b-8090-4f51-9a93-46a36ff28baa-run-httpd\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.554108 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.554151 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f21ae5-3e49-410e-a481-00e837d94c6c-combined-ca-bundle\") pod \"neutron-db-sync-dvs77\" (UID: \"19f21ae5-3e49-410e-a481-00e837d94c6c\") " pod="openstack/neutron-db-sync-dvs77" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.554177 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65f64c1b-8090-4f51-9a93-46a36ff28baa-log-httpd\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.554309 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6r8dw\" (UniqueName: \"kubernetes.io/projected/a49af42e-3a80-4f2a-9b4b-f43946a32c49-kube-api-access-6r8dw\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.554376 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzqdt\" (UniqueName: \"kubernetes.io/projected/65f64c1b-8090-4f51-9a93-46a36ff28baa-kube-api-access-bzqdt\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.554430 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-scripts\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.554453 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntfr4\" (UniqueName: \"kubernetes.io/projected/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-kube-api-access-ntfr4\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.554487 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/19f21ae5-3e49-410e-a481-00e837d94c6c-config\") pod \"neutron-db-sync-dvs77\" (UID: \"19f21ae5-3e49-410e-a481-00e837d94c6c\") " pod="openstack/neutron-db-sync-dvs77" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.554579 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a49af42e-3a80-4f2a-9b4b-f43946a32c49-logs\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.555278 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a49af42e-3a80-4f2a-9b4b-f43946a32c49-logs\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.556401 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a49af42e-3a80-4f2a-9b4b-f43946a32c49-config-data\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.556455 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-etc-machine-id\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.558603 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a49af42e-3a80-4f2a-9b4b-f43946a32c49-horizon-secret-key\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.558759 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a49af42e-3a80-4f2a-9b4b-f43946a32c49-scripts\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.564013 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.572131 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-combined-ca-bundle\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.572361 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-scripts\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.587330 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-db-sync-config-data\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.588018 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-config-data\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.602582 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6r8dw\" (UniqueName: \"kubernetes.io/projected/a49af42e-3a80-4f2a-9b4b-f43946a32c49-kube-api-access-6r8dw\") pod \"horizon-79b94b5d95-k97q6\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.606887 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntfr4\" (UniqueName: \"kubernetes.io/projected/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-kube-api-access-ntfr4\") pod \"cinder-db-sync-xx5nj\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.623570 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-6tggk"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.625113 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.648028 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-djprh"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.649138 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.662212 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.662412 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.662485 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-qtfh4" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.663461 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzqdt\" (UniqueName: \"kubernetes.io/projected/65f64c1b-8090-4f51-9a93-46a36ff28baa-kube-api-access-bzqdt\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.663526 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/19f21ae5-3e49-410e-a481-00e837d94c6c-config\") pod \"neutron-db-sync-dvs77\" (UID: \"19f21ae5-3e49-410e-a481-00e837d94c6c\") " pod="openstack/neutron-db-sync-dvs77" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.663587 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-config-data\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.663608 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-scripts\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.663646 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzmb9\" (UniqueName: \"kubernetes.io/projected/19f21ae5-3e49-410e-a481-00e837d94c6c-kube-api-access-mzmb9\") pod \"neutron-db-sync-dvs77\" (UID: \"19f21ae5-3e49-410e-a481-00e837d94c6c\") " pod="openstack/neutron-db-sync-dvs77" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.663668 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.663691 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65f64c1b-8090-4f51-9a93-46a36ff28baa-run-httpd\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.663708 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.663727 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f21ae5-3e49-410e-a481-00e837d94c6c-combined-ca-bundle\") pod \"neutron-db-sync-dvs77\" (UID: \"19f21ae5-3e49-410e-a481-00e837d94c6c\") " pod="openstack/neutron-db-sync-dvs77" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.663742 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65f64c1b-8090-4f51-9a93-46a36ff28baa-log-httpd\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.664207 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65f64c1b-8090-4f51-9a93-46a36ff28baa-log-httpd\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.667622 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65f64c1b-8090-4f51-9a93-46a36ff28baa-run-httpd\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.669910 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-t82b5"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.671363 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-t82b5" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.673670 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.675567 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f21ae5-3e49-410e-a481-00e837d94c6c-combined-ca-bundle\") pod \"neutron-db-sync-dvs77\" (UID: \"19f21ae5-3e49-410e-a481-00e837d94c6c\") " pod="openstack/neutron-db-sync-dvs77" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.675850 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.676040 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-gpzm4" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.682594 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/19f21ae5-3e49-410e-a481-00e837d94c6c-config\") pod \"neutron-db-sync-dvs77\" (UID: \"19f21ae5-3e49-410e-a481-00e837d94c6c\") " pod="openstack/neutron-db-sync-dvs77" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.682647 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-djprh"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.688906 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzqdt\" (UniqueName: \"kubernetes.io/projected/65f64c1b-8090-4f51-9a93-46a36ff28baa-kube-api-access-bzqdt\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.689075 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-config-data\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.704366 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-scripts\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.704431 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-6tggk"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.708758 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.714858 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzmb9\" (UniqueName: \"kubernetes.io/projected/19f21ae5-3e49-410e-a481-00e837d94c6c-kube-api-access-mzmb9\") pod \"neutron-db-sync-dvs77\" (UID: \"19f21ae5-3e49-410e-a481-00e837d94c6c\") " pod="openstack/neutron-db-sync-dvs77" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.716404 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-t82b5"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.731421 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-589cccf487-rmm6g"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.736726 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.739890 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-589cccf487-rmm6g"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.754668 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.756546 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.761324 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.761490 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-8nw9f" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.761618 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.761746 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.766920 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-combined-ca-bundle\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.766967 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k4mpl\" (UniqueName: \"kubernetes.io/projected/909bc667-1a51-44ef-b676-dabab2050b4e-kube-api-access-k4mpl\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.766996 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-config\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.767039 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzn45\" (UniqueName: \"kubernetes.io/projected/7e650049-c8bd-4a60-a1f7-1b022752ff7a-kube-api-access-rzn45\") pod \"barbican-db-sync-t82b5\" (UID: \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\") " pod="openstack/barbican-db-sync-t82b5" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.767091 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.767143 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7e650049-c8bd-4a60-a1f7-1b022752ff7a-db-sync-config-data\") pod \"barbican-db-sync-t82b5\" (UID: \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\") " pod="openstack/barbican-db-sync-t82b5" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.767162 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.767189 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.767215 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.767259 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-scripts\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.767284 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h6kj4\" (UniqueName: \"kubernetes.io/projected/40229997-b3af-4531-8bf1-e8ac2aed63e5-kube-api-access-h6kj4\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.767334 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e650049-c8bd-4a60-a1f7-1b022752ff7a-combined-ca-bundle\") pod \"barbican-db-sync-t82b5\" (UID: \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\") " pod="openstack/barbican-db-sync-t82b5" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.767354 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/909bc667-1a51-44ef-b676-dabab2050b4e-logs\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.767395 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-config-data\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.780748 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.784784 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.816626 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.875824 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.875884 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-scripts\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.875913 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h6kj4\" (UniqueName: \"kubernetes.io/projected/40229997-b3af-4531-8bf1-e8ac2aed63e5-kube-api-access-h6kj4\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.875950 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.875969 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.875991 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/44202bfc-d64a-46ce-a8a7-d49c68691337-horizon-secret-key\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876012 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e650049-c8bd-4a60-a1f7-1b022752ff7a-combined-ca-bundle\") pod \"barbican-db-sync-t82b5\" (UID: \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\") " pod="openstack/barbican-db-sync-t82b5" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876031 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/909bc667-1a51-44ef-b676-dabab2050b4e-logs\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876059 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44202bfc-d64a-46ce-a8a7-d49c68691337-logs\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876088 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-config-data\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876109 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-config-data\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876130 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnbd2\" (UniqueName: \"kubernetes.io/projected/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-kube-api-access-hnbd2\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876168 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-combined-ca-bundle\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876191 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k4mpl\" (UniqueName: \"kubernetes.io/projected/909bc667-1a51-44ef-b676-dabab2050b4e-kube-api-access-k4mpl\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876211 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-scripts\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876238 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-config\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876290 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44202bfc-d64a-46ce-a8a7-d49c68691337-config-data\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876309 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876332 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzn45\" (UniqueName: \"kubernetes.io/projected/7e650049-c8bd-4a60-a1f7-1b022752ff7a-kube-api-access-rzn45\") pod \"barbican-db-sync-t82b5\" (UID: \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\") " pod="openstack/barbican-db-sync-t82b5" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876354 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876416 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876443 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/44202bfc-d64a-46ce-a8a7-d49c68691337-scripts\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876469 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74rx7\" (UniqueName: \"kubernetes.io/projected/44202bfc-d64a-46ce-a8a7-d49c68691337-kube-api-access-74rx7\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876509 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7e650049-c8bd-4a60-a1f7-1b022752ff7a-db-sync-config-data\") pod \"barbican-db-sync-t82b5\" (UID: \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\") " pod="openstack/barbican-db-sync-t82b5" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876536 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876573 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-logs\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.876596 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.880889 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-dns-swift-storage-0\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.884528 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/909bc667-1a51-44ef-b676-dabab2050b4e-logs\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.886742 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e650049-c8bd-4a60-a1f7-1b022752ff7a-combined-ca-bundle\") pod \"barbican-db-sync-t82b5\" (UID: \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\") " pod="openstack/barbican-db-sync-t82b5" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.887726 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-config\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.887724 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-combined-ca-bundle\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.888354 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-ovsdbserver-sb\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.888523 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-ovsdbserver-nb\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.893148 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-scripts\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.895828 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.896172 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-dns-svc\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.907028 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-config-data\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.915607 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h6kj4\" (UniqueName: \"kubernetes.io/projected/40229997-b3af-4531-8bf1-e8ac2aed63e5-kube-api-access-h6kj4\") pod \"dnsmasq-dns-57c957c4ff-6tggk\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.943346 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzn45\" (UniqueName: \"kubernetes.io/projected/7e650049-c8bd-4a60-a1f7-1b022752ff7a-kube-api-access-rzn45\") pod \"barbican-db-sync-t82b5\" (UID: \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\") " pod="openstack/barbican-db-sync-t82b5" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.943778 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k4mpl\" (UniqueName: \"kubernetes.io/projected/909bc667-1a51-44ef-b676-dabab2050b4e-kube-api-access-k4mpl\") pod \"placement-db-sync-djprh\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " pod="openstack/placement-db-sync-djprh" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.964765 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7e650049-c8bd-4a60-a1f7-1b022752ff7a-db-sync-config-data\") pod \"barbican-db-sync-t82b5\" (UID: \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\") " pod="openstack/barbican-db-sync-t82b5" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.968821 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-dvs77" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.986552 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-logs\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.986713 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.986748 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.986770 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/44202bfc-d64a-46ce-a8a7-d49c68691337-horizon-secret-key\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.986826 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44202bfc-d64a-46ce-a8a7-d49c68691337-logs\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.986856 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-config-data\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.986892 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnbd2\" (UniqueName: \"kubernetes.io/projected/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-kube-api-access-hnbd2\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.986984 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-scripts\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.987031 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44202bfc-d64a-46ce-a8a7-d49c68691337-config-data\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.987063 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.987088 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.987181 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/44202bfc-d64a-46ce-a8a7-d49c68691337-scripts\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.987222 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74rx7\" (UniqueName: \"kubernetes.io/projected/44202bfc-d64a-46ce-a8a7-d49c68691337-kube-api-access-74rx7\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.988135 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-logs\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.988393 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:16 crc kubenswrapper[4910]: I1125 21:47:16.990738 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44202bfc-d64a-46ce-a8a7-d49c68691337-logs\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:16.999144 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.010715 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/44202bfc-d64a-46ce-a8a7-d49c68691337-scripts\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.021772 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44202bfc-d64a-46ce-a8a7-d49c68691337-config-data\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.024533 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-djprh" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.029732 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.035614 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74rx7\" (UniqueName: \"kubernetes.io/projected/44202bfc-d64a-46ce-a8a7-d49c68691337-kube-api-access-74rx7\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.036148 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.037774 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnbd2\" (UniqueName: \"kubernetes.io/projected/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-kube-api-access-hnbd2\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.041908 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-scripts\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.042506 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.042976 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-t82b5" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.054724 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/44202bfc-d64a-46ce-a8a7-d49c68691337-horizon-secret-key\") pod \"horizon-589cccf487-rmm6g\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.064053 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-config-data\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.086961 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.092524 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.117883 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.314432 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-74bbs"] Nov 25 21:47:17 crc kubenswrapper[4910]: W1125 21:47:17.408864 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2666f85f_8913_4931_80d7_42daa0c5749d.slice/crio-f4fc37fb5b330e53e3dcd6d5d4f1db95210ef6263469e557bf09f44a17a58177 WatchSource:0}: Error finding container f4fc37fb5b330e53e3dcd6d5d4f1db95210ef6263469e557bf09f44a17a58177: Status 404 returned error can't find the container with id f4fc37fb5b330e53e3dcd6d5d4f1db95210ef6263469e557bf09f44a17a58177 Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.444311 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.445905 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.453679 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.453830 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.462758 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.511570 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-lhjdg"] Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.614050 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-logs\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.614130 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.614172 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48fff\" (UniqueName: \"kubernetes.io/projected/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-kube-api-access-48fff\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.614197 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.614234 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.614274 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.614351 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.614475 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.647086 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-xx5nj"] Nov 25 21:47:17 crc kubenswrapper[4910]: W1125 21:47:17.664675 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd7c1bc8b_7026_4f6a_8a1b_fee1aa94f0a4.slice/crio-26581e806db35109974db675455d7197163b70988b19ca60d0967fba2494f66f WatchSource:0}: Error finding container 26581e806db35109974db675455d7197163b70988b19ca60d0967fba2494f66f: Status 404 returned error can't find the container with id 26581e806db35109974db675455d7197163b70988b19ca60d0967fba2494f66f Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.719396 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.719471 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.719600 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-logs\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.719660 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.719698 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48fff\" (UniqueName: \"kubernetes.io/projected/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-kube-api-access-48fff\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.719730 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.719763 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.719807 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.720855 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-logs\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.721308 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.724207 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.729151 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.732608 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.735410 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.742745 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.751563 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48fff\" (UniqueName: \"kubernetes.io/projected/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-kube-api-access-48fff\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:17 crc kubenswrapper[4910]: I1125 21:47:17.807510 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:17.861208 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-xx5nj" event={"ID":"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4","Type":"ContainerStarted","Data":"26581e806db35109974db675455d7197163b70988b19ca60d0967fba2494f66f"} Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:17.867312 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" event={"ID":"2666f85f-8913-4931-80d7-42daa0c5749d","Type":"ContainerStarted","Data":"f4fc37fb5b330e53e3dcd6d5d4f1db95210ef6263469e557bf09f44a17a58177"} Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:17.866012 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" podUID="2666f85f-8913-4931-80d7-42daa0c5749d" containerName="init" containerID="cri-o://d629670c87a7adec83db92398b4462b7cceadc09476ec6d5fd88de0ff751c9e9" gracePeriod=10 Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:17.870049 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lhjdg" event={"ID":"ba30a43d-f3c2-4319-8260-a0431d9a734c","Type":"ContainerStarted","Data":"c2b383a344323887789b37ab1ade3d6b7d630eb66584d7c6a720be64319d6368"} Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:17.880332 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:17.922200 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-lhjdg" podStartSLOduration=1.922178078 podStartE2EDuration="1.922178078s" podCreationTimestamp="2025-11-25 21:47:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:17.913818716 +0000 UTC m=+993.376295058" watchObservedRunningTime="2025-11-25 21:47:17.922178078 +0000 UTC m=+993.384654400" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:17.952284 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.053995 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-dvs77"] Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.069332 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-djprh"] Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.081403 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-79b94b5d95-k97q6"] Nov 25 21:47:18 crc kubenswrapper[4910]: W1125 21:47:18.098546 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda49af42e_3a80_4f2a_9b4b_f43946a32c49.slice/crio-dbee99a9efca638025b0159da0cf4330d32f82a9b0d671f132609b3c69329022 WatchSource:0}: Error finding container dbee99a9efca638025b0159da0cf4330d32f82a9b0d671f132609b3c69329022: Status 404 returned error can't find the container with id dbee99a9efca638025b0159da0cf4330d32f82a9b0d671f132609b3c69329022 Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.531668 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.591103 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-589cccf487-rmm6g"] Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.628568 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.648178 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-847c69868c-h5mpg"] Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.650273 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.691312 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-847c69868c-h5mpg"] Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.765132 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-config-data\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.765189 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-logs\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.765228 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-horizon-secret-key\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.765299 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-scripts\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.765337 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2mmg\" (UniqueName: \"kubernetes.io/projected/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-kube-api-access-j2mmg\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.777319 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.822400 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-t82b5"] Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.844103 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-6tggk"] Nov 25 21:47:18 crc kubenswrapper[4910]: W1125 21:47:18.860274 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod44202bfc_d64a_46ce_a8a7_d49c68691337.slice/crio-ee0ea256ea04c33b2504a7beaaa56bb380fcc53692d90f662a7c096eeb0d0280 WatchSource:0}: Error finding container ee0ea256ea04c33b2504a7beaaa56bb380fcc53692d90f662a7c096eeb0d0280: Status 404 returned error can't find the container with id ee0ea256ea04c33b2504a7beaaa56bb380fcc53692d90f662a7c096eeb0d0280 Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.866796 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-scripts\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.866864 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2mmg\" (UniqueName: \"kubernetes.io/projected/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-kube-api-access-j2mmg\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.866949 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-config-data\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.866978 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-logs\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.867004 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-horizon-secret-key\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.868385 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-589cccf487-rmm6g"] Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.868926 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-scripts\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.869366 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-logs\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.870713 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-config-data\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.877613 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-horizon-secret-key\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.894815 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2mmg\" (UniqueName: \"kubernetes.io/projected/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-kube-api-access-j2mmg\") pod \"horizon-847c69868c-h5mpg\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.919582 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-t82b5" event={"ID":"7e650049-c8bd-4a60-a1f7-1b022752ff7a","Type":"ContainerStarted","Data":"6035c8238dc642073c017d425714504c618bf109be08dce5c5f3b312f8204e8f"} Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.925914 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-589cccf487-rmm6g" event={"ID":"44202bfc-d64a-46ce-a8a7-d49c68691337","Type":"ContainerStarted","Data":"ee0ea256ea04c33b2504a7beaaa56bb380fcc53692d90f662a7c096eeb0d0280"} Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.936019 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-djprh" event={"ID":"909bc667-1a51-44ef-b676-dabab2050b4e","Type":"ContainerStarted","Data":"f39d8cb7e5f17559c0212e05269e4f5a1686e37e1bfdd6dec5baff21224ce0db"} Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.938008 4910 generic.go:334] "Generic (PLEG): container finished" podID="2666f85f-8913-4931-80d7-42daa0c5749d" containerID="d629670c87a7adec83db92398b4462b7cceadc09476ec6d5fd88de0ff751c9e9" exitCode=0 Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.938114 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" event={"ID":"2666f85f-8913-4931-80d7-42daa0c5749d","Type":"ContainerDied","Data":"d629670c87a7adec83db92398b4462b7cceadc09476ec6d5fd88de0ff751c9e9"} Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.940980 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" event={"ID":"40229997-b3af-4531-8bf1-e8ac2aed63e5","Type":"ContainerStarted","Data":"9a81f9ea3be2a0abcbbebfbe89135e951029c32f2dba34caa916a45b9f79d28d"} Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.944184 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-dvs77" event={"ID":"19f21ae5-3e49-410e-a481-00e837d94c6c","Type":"ContainerStarted","Data":"7714ec33692b75cb39718e60df64e30bbed48f36216d945b74baf39db7e0d990"} Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.944207 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-dvs77" event={"ID":"19f21ae5-3e49-410e-a481-00e837d94c6c","Type":"ContainerStarted","Data":"c8ebe66188495cc72805dadca951796879215f710136a4738f0f45b6287337a6"} Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.947663 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79b94b5d95-k97q6" event={"ID":"a49af42e-3a80-4f2a-9b4b-f43946a32c49","Type":"ContainerStarted","Data":"dbee99a9efca638025b0159da0cf4330d32f82a9b0d671f132609b3c69329022"} Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.954148 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65f64c1b-8090-4f51-9a93-46a36ff28baa","Type":"ContainerStarted","Data":"d14f3de7bca5a06177ef6458ea84c385817edc1674f9318cd8689701116e7014"} Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.960651 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lhjdg" event={"ID":"ba30a43d-f3c2-4319-8260-a0431d9a734c","Type":"ContainerStarted","Data":"f3d49d6ebcb61f7bf1289baffc3034331ccbfda0ff3a34702beed6e0c21595be"} Nov 25 21:47:18 crc kubenswrapper[4910]: I1125 21:47:18.976687 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.003791 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.036955 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-dvs77" podStartSLOduration=3.036931335 podStartE2EDuration="3.036931335s" podCreationTimestamp="2025-11-25 21:47:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:18.967567215 +0000 UTC m=+994.430043537" watchObservedRunningTime="2025-11-25 21:47:19.036931335 +0000 UTC m=+994.499407657" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.092686 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.173087 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-dns-svc\") pod \"2666f85f-8913-4931-80d7-42daa0c5749d\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.173201 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5t2cg\" (UniqueName: \"kubernetes.io/projected/2666f85f-8913-4931-80d7-42daa0c5749d-kube-api-access-5t2cg\") pod \"2666f85f-8913-4931-80d7-42daa0c5749d\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.173285 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-ovsdbserver-nb\") pod \"2666f85f-8913-4931-80d7-42daa0c5749d\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.173405 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-dns-swift-storage-0\") pod \"2666f85f-8913-4931-80d7-42daa0c5749d\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.173480 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-ovsdbserver-sb\") pod \"2666f85f-8913-4931-80d7-42daa0c5749d\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.173522 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-config\") pod \"2666f85f-8913-4931-80d7-42daa0c5749d\" (UID: \"2666f85f-8913-4931-80d7-42daa0c5749d\") " Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.178320 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2666f85f-8913-4931-80d7-42daa0c5749d-kube-api-access-5t2cg" (OuterVolumeSpecName: "kube-api-access-5t2cg") pod "2666f85f-8913-4931-80d7-42daa0c5749d" (UID: "2666f85f-8913-4931-80d7-42daa0c5749d"). InnerVolumeSpecName "kube-api-access-5t2cg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.192592 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.204062 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2666f85f-8913-4931-80d7-42daa0c5749d" (UID: "2666f85f-8913-4931-80d7-42daa0c5749d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.212812 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2666f85f-8913-4931-80d7-42daa0c5749d" (UID: "2666f85f-8913-4931-80d7-42daa0c5749d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.213572 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-config" (OuterVolumeSpecName: "config") pod "2666f85f-8913-4931-80d7-42daa0c5749d" (UID: "2666f85f-8913-4931-80d7-42daa0c5749d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.236596 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2666f85f-8913-4931-80d7-42daa0c5749d" (UID: "2666f85f-8913-4931-80d7-42daa0c5749d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.238861 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2666f85f-8913-4931-80d7-42daa0c5749d" (UID: "2666f85f-8913-4931-80d7-42daa0c5749d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.276432 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.277783 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5t2cg\" (UniqueName: \"kubernetes.io/projected/2666f85f-8913-4931-80d7-42daa0c5749d-kube-api-access-5t2cg\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.277810 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.277822 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.277832 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.277844 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2666f85f-8913-4931-80d7-42daa0c5749d-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:19 crc kubenswrapper[4910]: I1125 21:47:19.632291 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-847c69868c-h5mpg"] Nov 25 21:47:20 crc kubenswrapper[4910]: I1125 21:47:20.023089 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" event={"ID":"2666f85f-8913-4931-80d7-42daa0c5749d","Type":"ContainerDied","Data":"f4fc37fb5b330e53e3dcd6d5d4f1db95210ef6263469e557bf09f44a17a58177"} Nov 25 21:47:20 crc kubenswrapper[4910]: I1125 21:47:20.023171 4910 scope.go:117] "RemoveContainer" containerID="d629670c87a7adec83db92398b4462b7cceadc09476ec6d5fd88de0ff751c9e9" Nov 25 21:47:20 crc kubenswrapper[4910]: I1125 21:47:20.023369 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6c9c9f998c-74bbs" Nov 25 21:47:20 crc kubenswrapper[4910]: I1125 21:47:20.078230 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-847c69868c-h5mpg" event={"ID":"472df0ce-39e1-4ccb-b92b-1ff7f7d88152","Type":"ContainerStarted","Data":"1353d090030bbf160b6536120f54efb6fcf69d88f3893bb21a45c97c2f3f2ad0"} Nov 25 21:47:20 crc kubenswrapper[4910]: I1125 21:47:20.088311 4910 generic.go:334] "Generic (PLEG): container finished" podID="40229997-b3af-4531-8bf1-e8ac2aed63e5" containerID="ec9056a732dbeffbfa763919006df073383e68dd1ed5e99330932a01d55e57ee" exitCode=0 Nov 25 21:47:20 crc kubenswrapper[4910]: I1125 21:47:20.088391 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" event={"ID":"40229997-b3af-4531-8bf1-e8ac2aed63e5","Type":"ContainerDied","Data":"ec9056a732dbeffbfa763919006df073383e68dd1ed5e99330932a01d55e57ee"} Nov 25 21:47:20 crc kubenswrapper[4910]: I1125 21:47:20.109948 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fda9cfce-4f37-4d0e-b8dc-c22089efdf63","Type":"ContainerStarted","Data":"631da099618c79e34901ed3a7a1dc202fb56603eb404dd7cb7b4ac060b28f562"} Nov 25 21:47:20 crc kubenswrapper[4910]: I1125 21:47:20.129672 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"57d0c3a5-eb4d-44f5-a569-3ff2a572747d","Type":"ContainerStarted","Data":"e7ddcc585b3737d450fc9a081113cdcd62be32df7bf7341e83ef2d7f822c122d"} Nov 25 21:47:20 crc kubenswrapper[4910]: I1125 21:47:20.136888 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-74bbs"] Nov 25 21:47:20 crc kubenswrapper[4910]: I1125 21:47:20.214326 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6c9c9f998c-74bbs"] Nov 25 21:47:21 crc kubenswrapper[4910]: I1125 21:47:21.152266 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" event={"ID":"40229997-b3af-4531-8bf1-e8ac2aed63e5","Type":"ContainerStarted","Data":"b250e040972a20cdcbe57b4900dbadb24a5b68db30164a5e1d34cccae2015294"} Nov 25 21:47:21 crc kubenswrapper[4910]: I1125 21:47:21.154009 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:21 crc kubenswrapper[4910]: I1125 21:47:21.168014 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fda9cfce-4f37-4d0e-b8dc-c22089efdf63","Type":"ContainerStarted","Data":"29a1abeaa06439b42eca14b24d892b39296234c9ae60f90fa83bf6b986afe872"} Nov 25 21:47:21 crc kubenswrapper[4910]: I1125 21:47:21.186638 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" podStartSLOduration=5.18661721 podStartE2EDuration="5.18661721s" podCreationTimestamp="2025-11-25 21:47:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:21.180018665 +0000 UTC m=+996.642495007" watchObservedRunningTime="2025-11-25 21:47:21.18661721 +0000 UTC m=+996.649093532" Nov 25 21:47:21 crc kubenswrapper[4910]: I1125 21:47:21.195014 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"57d0c3a5-eb4d-44f5-a569-3ff2a572747d","Type":"ContainerStarted","Data":"4c6ad8d9c0a968d1bf4ee65a4b4c7268ee61ae53d1b154dae0e96e12ce12bdfb"} Nov 25 21:47:21 crc kubenswrapper[4910]: I1125 21:47:21.230026 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2666f85f-8913-4931-80d7-42daa0c5749d" path="/var/lib/kubelet/pods/2666f85f-8913-4931-80d7-42daa0c5749d/volumes" Nov 25 21:47:22 crc kubenswrapper[4910]: I1125 21:47:22.222270 4910 generic.go:334] "Generic (PLEG): container finished" podID="ba30a43d-f3c2-4319-8260-a0431d9a734c" containerID="f3d49d6ebcb61f7bf1289baffc3034331ccbfda0ff3a34702beed6e0c21595be" exitCode=0 Nov 25 21:47:22 crc kubenswrapper[4910]: I1125 21:47:22.222359 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lhjdg" event={"ID":"ba30a43d-f3c2-4319-8260-a0431d9a734c","Type":"ContainerDied","Data":"f3d49d6ebcb61f7bf1289baffc3034331ccbfda0ff3a34702beed6e0c21595be"} Nov 25 21:47:22 crc kubenswrapper[4910]: I1125 21:47:22.234521 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fda9cfce-4f37-4d0e-b8dc-c22089efdf63" containerName="glance-log" containerID="cri-o://29a1abeaa06439b42eca14b24d892b39296234c9ae60f90fa83bf6b986afe872" gracePeriod=30 Nov 25 21:47:22 crc kubenswrapper[4910]: I1125 21:47:22.234777 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fda9cfce-4f37-4d0e-b8dc-c22089efdf63","Type":"ContainerStarted","Data":"9dc95c3a36dc1eadff6576f1d0a7aeded840fd7720b1004f579afc06879ad43c"} Nov 25 21:47:22 crc kubenswrapper[4910]: I1125 21:47:22.234803 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fda9cfce-4f37-4d0e-b8dc-c22089efdf63" containerName="glance-httpd" containerID="cri-o://9dc95c3a36dc1eadff6576f1d0a7aeded840fd7720b1004f579afc06879ad43c" gracePeriod=30 Nov 25 21:47:22 crc kubenswrapper[4910]: I1125 21:47:22.243345 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="57d0c3a5-eb4d-44f5-a569-3ff2a572747d" containerName="glance-log" containerID="cri-o://4c6ad8d9c0a968d1bf4ee65a4b4c7268ee61ae53d1b154dae0e96e12ce12bdfb" gracePeriod=30 Nov 25 21:47:22 crc kubenswrapper[4910]: I1125 21:47:22.243569 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="57d0c3a5-eb4d-44f5-a569-3ff2a572747d" containerName="glance-httpd" containerID="cri-o://4d74ba63c358e0edac56b5cb81d5b8a0ce66f53bc25cb0011ef862bcc562d0fc" gracePeriod=30 Nov 25 21:47:22 crc kubenswrapper[4910]: I1125 21:47:22.243836 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"57d0c3a5-eb4d-44f5-a569-3ff2a572747d","Type":"ContainerStarted","Data":"4d74ba63c358e0edac56b5cb81d5b8a0ce66f53bc25cb0011ef862bcc562d0fc"} Nov 25 21:47:22 crc kubenswrapper[4910]: I1125 21:47:22.286036 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.28601549 podStartE2EDuration="6.28601549s" podCreationTimestamp="2025-11-25 21:47:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:22.269539992 +0000 UTC m=+997.732016314" watchObservedRunningTime="2025-11-25 21:47:22.28601549 +0000 UTC m=+997.748491812" Nov 25 21:47:22 crc kubenswrapper[4910]: I1125 21:47:22.298384 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.298365137 podStartE2EDuration="6.298365137s" podCreationTimestamp="2025-11-25 21:47:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:22.295140432 +0000 UTC m=+997.757616764" watchObservedRunningTime="2025-11-25 21:47:22.298365137 +0000 UTC m=+997.760841459" Nov 25 21:47:23 crc kubenswrapper[4910]: I1125 21:47:23.259018 4910 generic.go:334] "Generic (PLEG): container finished" podID="57d0c3a5-eb4d-44f5-a569-3ff2a572747d" containerID="4d74ba63c358e0edac56b5cb81d5b8a0ce66f53bc25cb0011ef862bcc562d0fc" exitCode=0 Nov 25 21:47:23 crc kubenswrapper[4910]: I1125 21:47:23.259064 4910 generic.go:334] "Generic (PLEG): container finished" podID="57d0c3a5-eb4d-44f5-a569-3ff2a572747d" containerID="4c6ad8d9c0a968d1bf4ee65a4b4c7268ee61ae53d1b154dae0e96e12ce12bdfb" exitCode=143 Nov 25 21:47:23 crc kubenswrapper[4910]: I1125 21:47:23.259104 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"57d0c3a5-eb4d-44f5-a569-3ff2a572747d","Type":"ContainerDied","Data":"4d74ba63c358e0edac56b5cb81d5b8a0ce66f53bc25cb0011ef862bcc562d0fc"} Nov 25 21:47:23 crc kubenswrapper[4910]: I1125 21:47:23.259160 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"57d0c3a5-eb4d-44f5-a569-3ff2a572747d","Type":"ContainerDied","Data":"4c6ad8d9c0a968d1bf4ee65a4b4c7268ee61ae53d1b154dae0e96e12ce12bdfb"} Nov 25 21:47:23 crc kubenswrapper[4910]: I1125 21:47:23.262890 4910 generic.go:334] "Generic (PLEG): container finished" podID="fda9cfce-4f37-4d0e-b8dc-c22089efdf63" containerID="9dc95c3a36dc1eadff6576f1d0a7aeded840fd7720b1004f579afc06879ad43c" exitCode=0 Nov 25 21:47:23 crc kubenswrapper[4910]: I1125 21:47:23.262924 4910 generic.go:334] "Generic (PLEG): container finished" podID="fda9cfce-4f37-4d0e-b8dc-c22089efdf63" containerID="29a1abeaa06439b42eca14b24d892b39296234c9ae60f90fa83bf6b986afe872" exitCode=143 Nov 25 21:47:23 crc kubenswrapper[4910]: I1125 21:47:23.264055 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fda9cfce-4f37-4d0e-b8dc-c22089efdf63","Type":"ContainerDied","Data":"9dc95c3a36dc1eadff6576f1d0a7aeded840fd7720b1004f579afc06879ad43c"} Nov 25 21:47:23 crc kubenswrapper[4910]: I1125 21:47:23.264082 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fda9cfce-4f37-4d0e-b8dc-c22089efdf63","Type":"ContainerDied","Data":"29a1abeaa06439b42eca14b24d892b39296234c9ae60f90fa83bf6b986afe872"} Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.001052 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-79b94b5d95-k97q6"] Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.027328 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-58f8d7cc56-csk7l"] Nov 25 21:47:25 crc kubenswrapper[4910]: E1125 21:47:25.027786 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2666f85f-8913-4931-80d7-42daa0c5749d" containerName="init" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.027801 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2666f85f-8913-4931-80d7-42daa0c5749d" containerName="init" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.027975 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2666f85f-8913-4931-80d7-42daa0c5749d" containerName="init" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.030129 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.032903 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.059152 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-58f8d7cc56-csk7l"] Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.109659 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-847c69868c-h5mpg"] Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.146164 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-657976db8d-swkbt"] Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.146279 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-horizon-secret-key\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.146331 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78dc494b-f987-443a-a350-1988639b6fee-scripts\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.146365 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/78dc494b-f987-443a-a350-1988639b6fee-config-data\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.146494 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-combined-ca-bundle\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.146544 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/78dc494b-f987-443a-a350-1988639b6fee-logs\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.146647 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vpgsz\" (UniqueName: \"kubernetes.io/projected/78dc494b-f987-443a-a350-1988639b6fee-kube-api-access-vpgsz\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.146777 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-horizon-tls-certs\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.147809 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.165157 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-657976db8d-swkbt"] Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.249147 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7941e190-b648-4b11-946b-dddaa1bc98d9-logs\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.249211 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-horizon-secret-key\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.249239 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7941e190-b648-4b11-946b-dddaa1bc98d9-horizon-tls-certs\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.249297 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78dc494b-f987-443a-a350-1988639b6fee-scripts\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.249378 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/78dc494b-f987-443a-a350-1988639b6fee-config-data\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.249590 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtr8l\" (UniqueName: \"kubernetes.io/projected/7941e190-b648-4b11-946b-dddaa1bc98d9-kube-api-access-wtr8l\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.249733 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7941e190-b648-4b11-946b-dddaa1bc98d9-combined-ca-bundle\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.249926 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-combined-ca-bundle\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.250011 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7941e190-b648-4b11-946b-dddaa1bc98d9-horizon-secret-key\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.250105 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/78dc494b-f987-443a-a350-1988639b6fee-logs\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.250190 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vpgsz\" (UniqueName: \"kubernetes.io/projected/78dc494b-f987-443a-a350-1988639b6fee-kube-api-access-vpgsz\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.250271 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-horizon-tls-certs\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.250326 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7941e190-b648-4b11-946b-dddaa1bc98d9-config-data\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.250421 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7941e190-b648-4b11-946b-dddaa1bc98d9-scripts\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.250440 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78dc494b-f987-443a-a350-1988639b6fee-scripts\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.250989 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/78dc494b-f987-443a-a350-1988639b6fee-config-data\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.251025 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/78dc494b-f987-443a-a350-1988639b6fee-logs\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.255217 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-horizon-secret-key\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.255354 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-combined-ca-bundle\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.255448 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-horizon-tls-certs\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.275713 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vpgsz\" (UniqueName: \"kubernetes.io/projected/78dc494b-f987-443a-a350-1988639b6fee-kube-api-access-vpgsz\") pod \"horizon-58f8d7cc56-csk7l\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.351188 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.357573 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7941e190-b648-4b11-946b-dddaa1bc98d9-scripts\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.363840 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/7941e190-b648-4b11-946b-dddaa1bc98d9-scripts\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.367117 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7941e190-b648-4b11-946b-dddaa1bc98d9-logs\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.367190 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7941e190-b648-4b11-946b-dddaa1bc98d9-horizon-tls-certs\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.367317 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtr8l\" (UniqueName: \"kubernetes.io/projected/7941e190-b648-4b11-946b-dddaa1bc98d9-kube-api-access-wtr8l\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.367407 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7941e190-b648-4b11-946b-dddaa1bc98d9-combined-ca-bundle\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.367584 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7941e190-b648-4b11-946b-dddaa1bc98d9-horizon-secret-key\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.367757 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7941e190-b648-4b11-946b-dddaa1bc98d9-config-data\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.368510 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7941e190-b648-4b11-946b-dddaa1bc98d9-logs\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.370696 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7941e190-b648-4b11-946b-dddaa1bc98d9-config-data\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.371443 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/7941e190-b648-4b11-946b-dddaa1bc98d9-horizon-secret-key\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.372008 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/7941e190-b648-4b11-946b-dddaa1bc98d9-horizon-tls-certs\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.374329 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7941e190-b648-4b11-946b-dddaa1bc98d9-combined-ca-bundle\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.394618 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtr8l\" (UniqueName: \"kubernetes.io/projected/7941e190-b648-4b11-946b-dddaa1bc98d9-kube-api-access-wtr8l\") pod \"horizon-657976db8d-swkbt\" (UID: \"7941e190-b648-4b11-946b-dddaa1bc98d9\") " pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:25 crc kubenswrapper[4910]: I1125 21:47:25.497020 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.218947 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.284413 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-config-data\") pod \"ba30a43d-f3c2-4319-8260-a0431d9a734c\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.284613 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-combined-ca-bundle\") pod \"ba30a43d-f3c2-4319-8260-a0431d9a734c\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.284736 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-credential-keys\") pod \"ba30a43d-f3c2-4319-8260-a0431d9a734c\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.284782 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kt8mr\" (UniqueName: \"kubernetes.io/projected/ba30a43d-f3c2-4319-8260-a0431d9a734c-kube-api-access-kt8mr\") pod \"ba30a43d-f3c2-4319-8260-a0431d9a734c\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.284811 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-scripts\") pod \"ba30a43d-f3c2-4319-8260-a0431d9a734c\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.284873 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-fernet-keys\") pod \"ba30a43d-f3c2-4319-8260-a0431d9a734c\" (UID: \"ba30a43d-f3c2-4319-8260-a0431d9a734c\") " Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.295742 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba30a43d-f3c2-4319-8260-a0431d9a734c-kube-api-access-kt8mr" (OuterVolumeSpecName: "kube-api-access-kt8mr") pod "ba30a43d-f3c2-4319-8260-a0431d9a734c" (UID: "ba30a43d-f3c2-4319-8260-a0431d9a734c"). InnerVolumeSpecName "kube-api-access-kt8mr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.296122 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-scripts" (OuterVolumeSpecName: "scripts") pod "ba30a43d-f3c2-4319-8260-a0431d9a734c" (UID: "ba30a43d-f3c2-4319-8260-a0431d9a734c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.305032 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "ba30a43d-f3c2-4319-8260-a0431d9a734c" (UID: "ba30a43d-f3c2-4319-8260-a0431d9a734c"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.312673 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ba30a43d-f3c2-4319-8260-a0431d9a734c" (UID: "ba30a43d-f3c2-4319-8260-a0431d9a734c"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.321936 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-lhjdg" event={"ID":"ba30a43d-f3c2-4319-8260-a0431d9a734c","Type":"ContainerDied","Data":"c2b383a344323887789b37ab1ade3d6b7d630eb66584d7c6a720be64319d6368"} Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.321978 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2b383a344323887789b37ab1ade3d6b7d630eb66584d7c6a720be64319d6368" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.322040 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-lhjdg" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.331473 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ba30a43d-f3c2-4319-8260-a0431d9a734c" (UID: "ba30a43d-f3c2-4319-8260-a0431d9a734c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.342914 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-config-data" (OuterVolumeSpecName: "config-data") pod "ba30a43d-f3c2-4319-8260-a0431d9a734c" (UID: "ba30a43d-f3c2-4319-8260-a0431d9a734c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.387894 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.387928 4910 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.387938 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kt8mr\" (UniqueName: \"kubernetes.io/projected/ba30a43d-f3c2-4319-8260-a0431d9a734c-kube-api-access-kt8mr\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.387948 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.387957 4910 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:26 crc kubenswrapper[4910]: I1125 21:47:26.387965 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ba30a43d-f3c2-4319-8260-a0431d9a734c-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.033705 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.109484 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-vwvcl"] Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.109874 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" podUID="deaa9a59-2559-43c0-83fa-2380549d8c88" containerName="dnsmasq-dns" containerID="cri-o://ff6b5ed5ea80ff535de8c8d25f1a5a38d242dbd9d977c071b42adee5d842f27f" gracePeriod=10 Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.129730 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" podUID="deaa9a59-2559-43c0-83fa-2380549d8c88" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: connect: connection refused" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.340616 4910 generic.go:334] "Generic (PLEG): container finished" podID="deaa9a59-2559-43c0-83fa-2380549d8c88" containerID="ff6b5ed5ea80ff535de8c8d25f1a5a38d242dbd9d977c071b42adee5d842f27f" exitCode=0 Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.340689 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" event={"ID":"deaa9a59-2559-43c0-83fa-2380549d8c88","Type":"ContainerDied","Data":"ff6b5ed5ea80ff535de8c8d25f1a5a38d242dbd9d977c071b42adee5d842f27f"} Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.344976 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-lhjdg"] Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.354130 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-lhjdg"] Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.439968 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-hzbxg"] Nov 25 21:47:27 crc kubenswrapper[4910]: E1125 21:47:27.440363 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba30a43d-f3c2-4319-8260-a0431d9a734c" containerName="keystone-bootstrap" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.440384 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba30a43d-f3c2-4319-8260-a0431d9a734c" containerName="keystone-bootstrap" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.440568 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba30a43d-f3c2-4319-8260-a0431d9a734c" containerName="keystone-bootstrap" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.441535 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.444793 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.445004 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.449887 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.450020 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.450697 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-v6h2m" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.458846 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-hzbxg"] Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.517977 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-combined-ca-bundle\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.518026 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-scripts\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.518051 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-fernet-keys\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.518124 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-credential-keys\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.518178 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9t5s8\" (UniqueName: \"kubernetes.io/projected/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-kube-api-access-9t5s8\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.518215 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-config-data\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.619548 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-credential-keys\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.619657 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9t5s8\" (UniqueName: \"kubernetes.io/projected/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-kube-api-access-9t5s8\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.619698 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-config-data\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.619753 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-combined-ca-bundle\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.619774 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-scripts\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.619793 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-fernet-keys\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.626194 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-scripts\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.626646 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-fernet-keys\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.626738 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-combined-ca-bundle\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.627958 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-config-data\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.636684 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-credential-keys\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.645723 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9t5s8\" (UniqueName: \"kubernetes.io/projected/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-kube-api-access-9t5s8\") pod \"keystone-bootstrap-hzbxg\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:27 crc kubenswrapper[4910]: I1125 21:47:27.819200 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:29 crc kubenswrapper[4910]: I1125 21:47:29.216536 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba30a43d-f3c2-4319-8260-a0431d9a734c" path="/var/lib/kubelet/pods/ba30a43d-f3c2-4319-8260-a0431d9a734c/volumes" Nov 25 21:47:32 crc kubenswrapper[4910]: I1125 21:47:32.129828 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" podUID="deaa9a59-2559-43c0-83fa-2380549d8c88" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: connect: connection refused" Nov 25 21:47:37 crc kubenswrapper[4910]: I1125 21:47:37.129481 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" podUID="deaa9a59-2559-43c0-83fa-2380549d8c88" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: connect: connection refused" Nov 25 21:47:37 crc kubenswrapper[4910]: I1125 21:47:37.130188 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:38 crc kubenswrapper[4910]: I1125 21:47:38.459371 4910 generic.go:334] "Generic (PLEG): container finished" podID="19f21ae5-3e49-410e-a481-00e837d94c6c" containerID="7714ec33692b75cb39718e60df64e30bbed48f36216d945b74baf39db7e0d990" exitCode=0 Nov 25 21:47:38 crc kubenswrapper[4910]: I1125 21:47:38.459502 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-dvs77" event={"ID":"19f21ae5-3e49-410e-a481-00e837d94c6c","Type":"ContainerDied","Data":"7714ec33692b75cb39718e60df64e30bbed48f36216d945b74baf39db7e0d990"} Nov 25 21:47:38 crc kubenswrapper[4910]: I1125 21:47:38.973047 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.062894 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-scripts\") pod \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.062976 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-httpd-run\") pod \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.063056 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnbd2\" (UniqueName: \"kubernetes.io/projected/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-kube-api-access-hnbd2\") pod \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.063108 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.063176 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-logs\") pod \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.063225 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-combined-ca-bundle\") pod \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.063293 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-public-tls-certs\") pod \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.063319 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-config-data\") pod \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\" (UID: \"57d0c3a5-eb4d-44f5-a569-3ff2a572747d\") " Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.064485 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "57d0c3a5-eb4d-44f5-a569-3ff2a572747d" (UID: "57d0c3a5-eb4d-44f5-a569-3ff2a572747d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.064610 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-logs" (OuterVolumeSpecName: "logs") pod "57d0c3a5-eb4d-44f5-a569-3ff2a572747d" (UID: "57d0c3a5-eb4d-44f5-a569-3ff2a572747d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.070456 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-scripts" (OuterVolumeSpecName: "scripts") pod "57d0c3a5-eb4d-44f5-a569-3ff2a572747d" (UID: "57d0c3a5-eb4d-44f5-a569-3ff2a572747d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.071508 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-kube-api-access-hnbd2" (OuterVolumeSpecName: "kube-api-access-hnbd2") pod "57d0c3a5-eb4d-44f5-a569-3ff2a572747d" (UID: "57d0c3a5-eb4d-44f5-a569-3ff2a572747d"). InnerVolumeSpecName "kube-api-access-hnbd2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.073370 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "57d0c3a5-eb4d-44f5-a569-3ff2a572747d" (UID: "57d0c3a5-eb4d-44f5-a569-3ff2a572747d"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.095059 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57d0c3a5-eb4d-44f5-a569-3ff2a572747d" (UID: "57d0c3a5-eb4d-44f5-a569-3ff2a572747d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.115397 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-config-data" (OuterVolumeSpecName: "config-data") pod "57d0c3a5-eb4d-44f5-a569-3ff2a572747d" (UID: "57d0c3a5-eb4d-44f5-a569-3ff2a572747d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.134594 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "57d0c3a5-eb4d-44f5-a569-3ff2a572747d" (UID: "57d0c3a5-eb4d-44f5-a569-3ff2a572747d"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.165498 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.165537 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.165549 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnbd2\" (UniqueName: \"kubernetes.io/projected/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-kube-api-access-hnbd2\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.165584 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.165594 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.165602 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.165611 4910 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.165622 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57d0c3a5-eb4d-44f5-a569-3ff2a572747d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.185943 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.267690 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.471032 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.471542 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"57d0c3a5-eb4d-44f5-a569-3ff2a572747d","Type":"ContainerDied","Data":"e7ddcc585b3737d450fc9a081113cdcd62be32df7bf7341e83ef2d7f822c122d"} Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.471585 4910 scope.go:117] "RemoveContainer" containerID="4d74ba63c358e0edac56b5cb81d5b8a0ce66f53bc25cb0011ef862bcc562d0fc" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.506281 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.523219 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.538151 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:47:39 crc kubenswrapper[4910]: E1125 21:47:39.538824 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57d0c3a5-eb4d-44f5-a569-3ff2a572747d" containerName="glance-httpd" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.538850 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="57d0c3a5-eb4d-44f5-a569-3ff2a572747d" containerName="glance-httpd" Nov 25 21:47:39 crc kubenswrapper[4910]: E1125 21:47:39.538872 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57d0c3a5-eb4d-44f5-a569-3ff2a572747d" containerName="glance-log" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.538882 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="57d0c3a5-eb4d-44f5-a569-3ff2a572747d" containerName="glance-log" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.539152 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="57d0c3a5-eb4d-44f5-a569-3ff2a572747d" containerName="glance-log" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.539188 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="57d0c3a5-eb4d-44f5-a569-3ff2a572747d" containerName="glance-httpd" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.540625 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.543480 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.549213 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.560703 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.674278 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.674325 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d002bfa2-970b-44fc-b839-8e114323162e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.674347 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-config-data\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.674575 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.674706 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89h7x\" (UniqueName: \"kubernetes.io/projected/d002bfa2-970b-44fc-b839-8e114323162e-kube-api-access-89h7x\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.674779 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.674801 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d002bfa2-970b-44fc-b839-8e114323162e-logs\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.674830 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-scripts\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.776615 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.776697 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89h7x\" (UniqueName: \"kubernetes.io/projected/d002bfa2-970b-44fc-b839-8e114323162e-kube-api-access-89h7x\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.776729 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.776748 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d002bfa2-970b-44fc-b839-8e114323162e-logs\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.776766 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-scripts\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.776823 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.776839 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d002bfa2-970b-44fc-b839-8e114323162e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.776864 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-config-data\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.776869 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.778010 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d002bfa2-970b-44fc-b839-8e114323162e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.778384 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d002bfa2-970b-44fc-b839-8e114323162e-logs\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.797889 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.798280 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-config-data\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.802170 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-scripts\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.802263 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.822121 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89h7x\" (UniqueName: \"kubernetes.io/projected/d002bfa2-970b-44fc-b839-8e114323162e-kube-api-access-89h7x\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.827851 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " pod="openstack/glance-default-external-api-0" Nov 25 21:47:39 crc kubenswrapper[4910]: I1125 21:47:39.865867 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 21:47:41 crc kubenswrapper[4910]: I1125 21:47:41.216021 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57d0c3a5-eb4d-44f5-a569-3ff2a572747d" path="/var/lib/kubelet/pods/57d0c3a5-eb4d-44f5-a569-3ff2a572747d/volumes" Nov 25 21:47:41 crc kubenswrapper[4910]: E1125 21:47:41.313625 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Nov 25 21:47:41 crc kubenswrapper[4910]: E1125 21:47:41.313823 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n6bh584h548h8bh658h5ch59ch66bhbch694h68dh9hc9h696h65ch84h5ddh78h56fh89hd5h66h54bh577h5b5h57dh595h9h685h6fh5fch658q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bzqdt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(65f64c1b-8090-4f51-9a93-46a36ff28baa): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 21:47:41 crc kubenswrapper[4910]: E1125 21:47:41.335161 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 25 21:47:41 crc kubenswrapper[4910]: E1125 21:47:41.335368 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nb8h686h5f8h58fh675h6hb9hd5h696h556h699h596h5b8hbdh54bh554h67fh576hcdh4h676h55bh694h5ffhcbhcbh5h57bhfbh5ddh8bh5f7q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-j2mmg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-847c69868c-h5mpg_openstack(472df0ce-39e1-4ccb-b92b-1ff7f7d88152): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 21:47:41 crc kubenswrapper[4910]: E1125 21:47:41.337990 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-847c69868c-h5mpg" podUID="472df0ce-39e1-4ccb-b92b-1ff7f7d88152" Nov 25 21:47:41 crc kubenswrapper[4910]: E1125 21:47:41.342634 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Nov 25 21:47:41 crc kubenswrapper[4910]: E1125 21:47:41.342941 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n55ch5bdhb7h644h586h58dh687h87h5cch656hbch578h5f5h677h656hf6h5c8hb7h74h65ch58h8h5bdh58ch685h55fh575h659h648h5bh659h66q,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-74rx7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-589cccf487-rmm6g_openstack(44202bfc-d64a-46ce-a8a7-d49c68691337): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 21:47:41 crc kubenswrapper[4910]: E1125 21:47:41.346449 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-589cccf487-rmm6g" podUID="44202bfc-d64a-46ce-a8a7-d49c68691337" Nov 25 21:47:42 crc kubenswrapper[4910]: I1125 21:47:42.129844 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" podUID="deaa9a59-2559-43c0-83fa-2380549d8c88" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.126:5353: connect: connection refused" Nov 25 21:47:43 crc kubenswrapper[4910]: E1125 21:47:43.236322 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Nov 25 21:47:43 crc kubenswrapper[4910]: E1125 21:47:43.236567 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-k4mpl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-djprh_openstack(909bc667-1a51-44ef-b676-dabab2050b4e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 21:47:43 crc kubenswrapper[4910]: E1125 21:47:43.237776 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-djprh" podUID="909bc667-1a51-44ef-b676-dabab2050b4e" Nov 25 21:47:43 crc kubenswrapper[4910]: E1125 21:47:43.521801 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-djprh" podUID="909bc667-1a51-44ef-b676-dabab2050b4e" Nov 25 21:47:45 crc kubenswrapper[4910]: E1125 21:47:45.195181 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Nov 25 21:47:45 crc kubenswrapper[4910]: E1125 21:47:45.195834 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ntfr4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-xx5nj_openstack(d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 21:47:45 crc kubenswrapper[4910]: E1125 21:47:45.198550 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-xx5nj" podUID="d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.249499 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-dvs77" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.400294 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mzmb9\" (UniqueName: \"kubernetes.io/projected/19f21ae5-3e49-410e-a481-00e837d94c6c-kube-api-access-mzmb9\") pod \"19f21ae5-3e49-410e-a481-00e837d94c6c\" (UID: \"19f21ae5-3e49-410e-a481-00e837d94c6c\") " Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.400440 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f21ae5-3e49-410e-a481-00e837d94c6c-combined-ca-bundle\") pod \"19f21ae5-3e49-410e-a481-00e837d94c6c\" (UID: \"19f21ae5-3e49-410e-a481-00e837d94c6c\") " Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.401047 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/19f21ae5-3e49-410e-a481-00e837d94c6c-config\") pod \"19f21ae5-3e49-410e-a481-00e837d94c6c\" (UID: \"19f21ae5-3e49-410e-a481-00e837d94c6c\") " Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.406370 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19f21ae5-3e49-410e-a481-00e837d94c6c-kube-api-access-mzmb9" (OuterVolumeSpecName: "kube-api-access-mzmb9") pod "19f21ae5-3e49-410e-a481-00e837d94c6c" (UID: "19f21ae5-3e49-410e-a481-00e837d94c6c"). InnerVolumeSpecName "kube-api-access-mzmb9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.431065 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19f21ae5-3e49-410e-a481-00e837d94c6c-config" (OuterVolumeSpecName: "config") pod "19f21ae5-3e49-410e-a481-00e837d94c6c" (UID: "19f21ae5-3e49-410e-a481-00e837d94c6c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.440754 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19f21ae5-3e49-410e-a481-00e837d94c6c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19f21ae5-3e49-410e-a481-00e837d94c6c" (UID: "19f21ae5-3e49-410e-a481-00e837d94c6c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.503388 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mzmb9\" (UniqueName: \"kubernetes.io/projected/19f21ae5-3e49-410e-a481-00e837d94c6c-kube-api-access-mzmb9\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.503450 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19f21ae5-3e49-410e-a481-00e837d94c6c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.503461 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/19f21ae5-3e49-410e-a481-00e837d94c6c-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.537319 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-dvs77" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.538025 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-dvs77" event={"ID":"19f21ae5-3e49-410e-a481-00e837d94c6c","Type":"ContainerDied","Data":"c8ebe66188495cc72805dadca951796879215f710136a4738f0f45b6287337a6"} Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.538076 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8ebe66188495cc72805dadca951796879215f710136a4738f0f45b6287337a6" Nov 25 21:47:45 crc kubenswrapper[4910]: E1125 21:47:45.539300 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-xx5nj" podUID="d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4" Nov 25 21:47:45 crc kubenswrapper[4910]: E1125 21:47:45.719838 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Nov 25 21:47:45 crc kubenswrapper[4910]: E1125 21:47:45.720453 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rzn45,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-t82b5_openstack(7e650049-c8bd-4a60-a1f7-1b022752ff7a): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 21:47:45 crc kubenswrapper[4910]: E1125 21:47:45.721596 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-t82b5" podUID="7e650049-c8bd-4a60-a1f7-1b022752ff7a" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.762427 4910 scope.go:117] "RemoveContainer" containerID="4c6ad8d9c0a968d1bf4ee65a4b4c7268ee61ae53d1b154dae0e96e12ce12bdfb" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.843756 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.861424 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.878302 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.901116 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.917031 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-config-data\") pod \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.917132 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2mmg\" (UniqueName: \"kubernetes.io/projected/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-kube-api-access-j2mmg\") pod \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.917235 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-scripts\") pod \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.917462 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-horizon-secret-key\") pod \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.917510 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-logs\") pod \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\" (UID: \"472df0ce-39e1-4ccb-b92b-1ff7f7d88152\") " Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.918878 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-logs" (OuterVolumeSpecName: "logs") pod "472df0ce-39e1-4ccb-b92b-1ff7f7d88152" (UID: "472df0ce-39e1-4ccb-b92b-1ff7f7d88152"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.920235 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-config-data" (OuterVolumeSpecName: "config-data") pod "472df0ce-39e1-4ccb-b92b-1ff7f7d88152" (UID: "472df0ce-39e1-4ccb-b92b-1ff7f7d88152"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.924233 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-scripts" (OuterVolumeSpecName: "scripts") pod "472df0ce-39e1-4ccb-b92b-1ff7f7d88152" (UID: "472df0ce-39e1-4ccb-b92b-1ff7f7d88152"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.931342 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "472df0ce-39e1-4ccb-b92b-1ff7f7d88152" (UID: "472df0ce-39e1-4ccb-b92b-1ff7f7d88152"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:45 crc kubenswrapper[4910]: I1125 21:47:45.948724 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-kube-api-access-j2mmg" (OuterVolumeSpecName: "kube-api-access-j2mmg") pod "472df0ce-39e1-4ccb-b92b-1ff7f7d88152" (UID: "472df0ce-39e1-4ccb-b92b-1ff7f7d88152"). InnerVolumeSpecName "kube-api-access-j2mmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.019456 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-httpd-run\") pod \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.019531 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-config\") pod \"deaa9a59-2559-43c0-83fa-2380549d8c88\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.019576 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44202bfc-d64a-46ce-a8a7-d49c68691337-config-data\") pod \"44202bfc-d64a-46ce-a8a7-d49c68691337\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.019617 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-48fff\" (UniqueName: \"kubernetes.io/projected/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-kube-api-access-48fff\") pod \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.019658 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-dns-svc\") pod \"deaa9a59-2559-43c0-83fa-2380549d8c88\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.019695 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/44202bfc-d64a-46ce-a8a7-d49c68691337-scripts\") pod \"44202bfc-d64a-46ce-a8a7-d49c68691337\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.019746 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-logs\") pod \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.019773 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-config-data\") pod \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.019825 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-dns-swift-storage-0\") pod \"deaa9a59-2559-43c0-83fa-2380549d8c88\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.019855 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44202bfc-d64a-46ce-a8a7-d49c68691337-logs\") pod \"44202bfc-d64a-46ce-a8a7-d49c68691337\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.019876 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.019907 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-ovsdbserver-sb\") pod \"deaa9a59-2559-43c0-83fa-2380549d8c88\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.019941 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-74rx7\" (UniqueName: \"kubernetes.io/projected/44202bfc-d64a-46ce-a8a7-d49c68691337-kube-api-access-74rx7\") pod \"44202bfc-d64a-46ce-a8a7-d49c68691337\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.019975 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-ovsdbserver-nb\") pod \"deaa9a59-2559-43c0-83fa-2380549d8c88\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.020049 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/44202bfc-d64a-46ce-a8a7-d49c68691337-horizon-secret-key\") pod \"44202bfc-d64a-46ce-a8a7-d49c68691337\" (UID: \"44202bfc-d64a-46ce-a8a7-d49c68691337\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.020101 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-scripts\") pod \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.020112 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-logs" (OuterVolumeSpecName: "logs") pod "fda9cfce-4f37-4d0e-b8dc-c22089efdf63" (UID: "fda9cfce-4f37-4d0e-b8dc-c22089efdf63"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.020154 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "fda9cfce-4f37-4d0e-b8dc-c22089efdf63" (UID: "fda9cfce-4f37-4d0e-b8dc-c22089efdf63"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.020129 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-internal-tls-certs\") pod \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.020260 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44202bfc-d64a-46ce-a8a7-d49c68691337-scripts" (OuterVolumeSpecName: "scripts") pod "44202bfc-d64a-46ce-a8a7-d49c68691337" (UID: "44202bfc-d64a-46ce-a8a7-d49c68691337"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.020290 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-combined-ca-bundle\") pod \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\" (UID: \"fda9cfce-4f37-4d0e-b8dc-c22089efdf63\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.020358 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7hc7\" (UniqueName: \"kubernetes.io/projected/deaa9a59-2559-43c0-83fa-2380549d8c88-kube-api-access-b7hc7\") pod \"deaa9a59-2559-43c0-83fa-2380549d8c88\" (UID: \"deaa9a59-2559-43c0-83fa-2380549d8c88\") " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.020967 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/44202bfc-d64a-46ce-a8a7-d49c68691337-logs" (OuterVolumeSpecName: "logs") pod "44202bfc-d64a-46ce-a8a7-d49c68691337" (UID: "44202bfc-d64a-46ce-a8a7-d49c68691337"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.021132 4910 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.021156 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.021192 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.021204 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.021217 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2mmg\" (UniqueName: \"kubernetes.io/projected/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-kube-api-access-j2mmg\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.021230 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/44202bfc-d64a-46ce-a8a7-d49c68691337-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.021280 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.021296 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/472df0ce-39e1-4ccb-b92b-1ff7f7d88152-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.021308 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/44202bfc-d64a-46ce-a8a7-d49c68691337-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.021411 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/44202bfc-d64a-46ce-a8a7-d49c68691337-config-data" (OuterVolumeSpecName: "config-data") pod "44202bfc-d64a-46ce-a8a7-d49c68691337" (UID: "44202bfc-d64a-46ce-a8a7-d49c68691337"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.023576 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "fda9cfce-4f37-4d0e-b8dc-c22089efdf63" (UID: "fda9cfce-4f37-4d0e-b8dc-c22089efdf63"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.025652 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-scripts" (OuterVolumeSpecName: "scripts") pod "fda9cfce-4f37-4d0e-b8dc-c22089efdf63" (UID: "fda9cfce-4f37-4d0e-b8dc-c22089efdf63"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.025803 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-kube-api-access-48fff" (OuterVolumeSpecName: "kube-api-access-48fff") pod "fda9cfce-4f37-4d0e-b8dc-c22089efdf63" (UID: "fda9cfce-4f37-4d0e-b8dc-c22089efdf63"). InnerVolumeSpecName "kube-api-access-48fff". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.026855 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44202bfc-d64a-46ce-a8a7-d49c68691337-kube-api-access-74rx7" (OuterVolumeSpecName: "kube-api-access-74rx7") pod "44202bfc-d64a-46ce-a8a7-d49c68691337" (UID: "44202bfc-d64a-46ce-a8a7-d49c68691337"). InnerVolumeSpecName "kube-api-access-74rx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.031792 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/44202bfc-d64a-46ce-a8a7-d49c68691337-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "44202bfc-d64a-46ce-a8a7-d49c68691337" (UID: "44202bfc-d64a-46ce-a8a7-d49c68691337"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.036171 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/deaa9a59-2559-43c0-83fa-2380549d8c88-kube-api-access-b7hc7" (OuterVolumeSpecName: "kube-api-access-b7hc7") pod "deaa9a59-2559-43c0-83fa-2380549d8c88" (UID: "deaa9a59-2559-43c0-83fa-2380549d8c88"). InnerVolumeSpecName "kube-api-access-b7hc7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.053864 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fda9cfce-4f37-4d0e-b8dc-c22089efdf63" (UID: "fda9cfce-4f37-4d0e-b8dc-c22089efdf63"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.065962 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "deaa9a59-2559-43c0-83fa-2380549d8c88" (UID: "deaa9a59-2559-43c0-83fa-2380549d8c88"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.065983 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-config-data" (OuterVolumeSpecName: "config-data") pod "fda9cfce-4f37-4d0e-b8dc-c22089efdf63" (UID: "fda9cfce-4f37-4d0e-b8dc-c22089efdf63"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.068792 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "deaa9a59-2559-43c0-83fa-2380549d8c88" (UID: "deaa9a59-2559-43c0-83fa-2380549d8c88"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.080594 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-config" (OuterVolumeSpecName: "config") pod "deaa9a59-2559-43c0-83fa-2380549d8c88" (UID: "deaa9a59-2559-43c0-83fa-2380549d8c88"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.080653 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "deaa9a59-2559-43c0-83fa-2380549d8c88" (UID: "deaa9a59-2559-43c0-83fa-2380549d8c88"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.083622 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "fda9cfce-4f37-4d0e-b8dc-c22089efdf63" (UID: "fda9cfce-4f37-4d0e-b8dc-c22089efdf63"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.089575 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "deaa9a59-2559-43c0-83fa-2380549d8c88" (UID: "deaa9a59-2559-43c0-83fa-2380549d8c88"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123156 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123190 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/44202bfc-d64a-46ce-a8a7-d49c68691337-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123202 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-48fff\" (UniqueName: \"kubernetes.io/projected/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-kube-api-access-48fff\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123212 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123220 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123229 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123274 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123283 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123293 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-74rx7\" (UniqueName: \"kubernetes.io/projected/44202bfc-d64a-46ce-a8a7-d49c68691337-kube-api-access-74rx7\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123301 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/deaa9a59-2559-43c0-83fa-2380549d8c88-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123309 4910 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/44202bfc-d64a-46ce-a8a7-d49c68691337-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123317 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123324 4910 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123332 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fda9cfce-4f37-4d0e-b8dc-c22089efdf63-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.123339 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7hc7\" (UniqueName: \"kubernetes.io/projected/deaa9a59-2559-43c0-83fa-2380549d8c88-kube-api-access-b7hc7\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.144260 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.224861 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.249656 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-657976db8d-swkbt"] Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.476171 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-52tbh"] Nov 25 21:47:46 crc kubenswrapper[4910]: E1125 21:47:46.476970 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19f21ae5-3e49-410e-a481-00e837d94c6c" containerName="neutron-db-sync" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.476990 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="19f21ae5-3e49-410e-a481-00e837d94c6c" containerName="neutron-db-sync" Nov 25 21:47:46 crc kubenswrapper[4910]: E1125 21:47:46.477016 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="deaa9a59-2559-43c0-83fa-2380549d8c88" containerName="init" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.477025 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="deaa9a59-2559-43c0-83fa-2380549d8c88" containerName="init" Nov 25 21:47:46 crc kubenswrapper[4910]: E1125 21:47:46.477036 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fda9cfce-4f37-4d0e-b8dc-c22089efdf63" containerName="glance-httpd" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.477044 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="fda9cfce-4f37-4d0e-b8dc-c22089efdf63" containerName="glance-httpd" Nov 25 21:47:46 crc kubenswrapper[4910]: E1125 21:47:46.477060 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="deaa9a59-2559-43c0-83fa-2380549d8c88" containerName="dnsmasq-dns" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.477067 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="deaa9a59-2559-43c0-83fa-2380549d8c88" containerName="dnsmasq-dns" Nov 25 21:47:46 crc kubenswrapper[4910]: E1125 21:47:46.477086 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fda9cfce-4f37-4d0e-b8dc-c22089efdf63" containerName="glance-log" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.477094 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="fda9cfce-4f37-4d0e-b8dc-c22089efdf63" containerName="glance-log" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.477312 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="fda9cfce-4f37-4d0e-b8dc-c22089efdf63" containerName="glance-log" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.477328 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="19f21ae5-3e49-410e-a481-00e837d94c6c" containerName="neutron-db-sync" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.477346 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="deaa9a59-2559-43c0-83fa-2380549d8c88" containerName="dnsmasq-dns" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.477360 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="fda9cfce-4f37-4d0e-b8dc-c22089efdf63" containerName="glance-httpd" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.493663 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.520054 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-52tbh"] Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.624430 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-589cccf487-rmm6g" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.629410 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-589cccf487-rmm6g" event={"ID":"44202bfc-d64a-46ce-a8a7-d49c68691337","Type":"ContainerDied","Data":"ee0ea256ea04c33b2504a7beaaa56bb380fcc53692d90f662a7c096eeb0d0280"} Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.639842 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.639916 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bv96x\" (UniqueName: \"kubernetes.io/projected/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-kube-api-access-bv96x\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.639952 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.640138 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-config\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.640185 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.640211 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.642918 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-657976db8d-swkbt" event={"ID":"7941e190-b648-4b11-946b-dddaa1bc98d9","Type":"ContainerStarted","Data":"53a1984692e45e2bb5bfa57e4f05d77704c5e9eb134e5c9297ce405d4badfb02"} Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.642977 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-657976db8d-swkbt" event={"ID":"7941e190-b648-4b11-946b-dddaa1bc98d9","Type":"ContainerStarted","Data":"1e786111010c1f6fa42bab500f80a172cca6d4374fcd9561655e9373716336a5"} Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.660597 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fda9cfce-4f37-4d0e-b8dc-c22089efdf63","Type":"ContainerDied","Data":"631da099618c79e34901ed3a7a1dc202fb56603eb404dd7cb7b4ac060b28f562"} Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.660662 4910 scope.go:117] "RemoveContainer" containerID="9dc95c3a36dc1eadff6576f1d0a7aeded840fd7720b1004f579afc06879ad43c" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.660782 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.716996 4910 scope.go:117] "RemoveContainer" containerID="29a1abeaa06439b42eca14b24d892b39296234c9ae60f90fa83bf6b986afe872" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.726570 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6b94985954-h6p9h"] Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.732343 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.736175 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-895cf5cf-vwvcl" event={"ID":"deaa9a59-2559-43c0-83fa-2380549d8c88","Type":"ContainerDied","Data":"ea94a411bceae6fa0faab97bcc8fc46ba2121a93ea4f99ff38489ef8d2ba4505"} Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.736306 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.739486 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.740504 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-847c69868c-h5mpg" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.741316 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-847c69868c-h5mpg" event={"ID":"472df0ce-39e1-4ccb-b92b-1ff7f7d88152","Type":"ContainerDied","Data":"1353d090030bbf160b6536120f54efb6fcf69d88f3893bb21a45c97c2f3f2ad0"} Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.742354 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-config\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.742392 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.742415 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.742444 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.742467 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bv96x\" (UniqueName: \"kubernetes.io/projected/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-kube-api-access-bv96x\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.742482 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.742872 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-2s7pb" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.743168 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.743865 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-dns-svc\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.744497 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-ovsdbserver-sb\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.744872 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.745400 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-config\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.745814 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-ovsdbserver-nb\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.780873 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bv96x\" (UniqueName: \"kubernetes.io/projected/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-kube-api-access-bv96x\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.793783 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-dns-swift-storage-0\") pod \"dnsmasq-dns-5ccc5c4795-52tbh\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: E1125 21:47:46.795773 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-t82b5" podUID="7e650049-c8bd-4a60-a1f7-1b022752ff7a" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.796491 4910 scope.go:117] "RemoveContainer" containerID="ff6b5ed5ea80ff535de8c8d25f1a5a38d242dbd9d977c071b42adee5d842f27f" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.822867 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.848216 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-httpd-config\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.848369 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54sjq\" (UniqueName: \"kubernetes.io/projected/ce75c296-74f5-4f94-bcdf-58bb1d44f445-kube-api-access-54sjq\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.848421 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-ovndb-tls-certs\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.848535 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-config\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.848633 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-combined-ca-bundle\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.855915 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-58f8d7cc56-csk7l"] Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.888165 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6b94985954-h6p9h"] Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.896937 4910 scope.go:117] "RemoveContainer" containerID="f112bd96412c513d48213d2dabb77cf3dcaf1bd0d017092c6975663bbeb626b5" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.929209 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-589cccf487-rmm6g"] Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.944117 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-hzbxg"] Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.949698 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-config\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.949763 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-combined-ca-bundle\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.949832 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-httpd-config\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.949874 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54sjq\" (UniqueName: \"kubernetes.io/projected/ce75c296-74f5-4f94-bcdf-58bb1d44f445-kube-api-access-54sjq\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.949891 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-ovndb-tls-certs\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.959854 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.961029 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-589cccf487-rmm6g"] Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.964039 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-config\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.971957 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-ovndb-tls-certs\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.975786 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-httpd-config\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.976576 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54sjq\" (UniqueName: \"kubernetes.io/projected/ce75c296-74f5-4f94-bcdf-58bb1d44f445-kube-api-access-54sjq\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:46 crc kubenswrapper[4910]: I1125 21:47:46.981526 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-combined-ca-bundle\") pod \"neutron-6b94985954-h6p9h\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.013392 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.024328 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.031073 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.042683 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-vwvcl"] Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.051510 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-895cf5cf-vwvcl"] Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.060324 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.062474 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.065026 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.065162 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.067455 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.093493 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-847c69868c-h5mpg"] Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.096713 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.101051 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-847c69868c-h5mpg"] Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.153446 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.153555 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5vtf8\" (UniqueName: \"kubernetes.io/projected/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-kube-api-access-5vtf8\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.153584 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.153608 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.153642 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-logs\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.153697 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.153715 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.153744 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.217661 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44202bfc-d64a-46ce-a8a7-d49c68691337" path="/var/lib/kubelet/pods/44202bfc-d64a-46ce-a8a7-d49c68691337/volumes" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.219197 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="472df0ce-39e1-4ccb-b92b-1ff7f7d88152" path="/var/lib/kubelet/pods/472df0ce-39e1-4ccb-b92b-1ff7f7d88152/volumes" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.219803 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="deaa9a59-2559-43c0-83fa-2380549d8c88" path="/var/lib/kubelet/pods/deaa9a59-2559-43c0-83fa-2380549d8c88/volumes" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.221185 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda9cfce-4f37-4d0e-b8dc-c22089efdf63" path="/var/lib/kubelet/pods/fda9cfce-4f37-4d0e-b8dc-c22089efdf63/volumes" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.255302 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.255375 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.255423 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5vtf8\" (UniqueName: \"kubernetes.io/projected/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-kube-api-access-5vtf8\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.255446 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.255470 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.255516 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-logs\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.255568 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.255596 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.255928 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.261891 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.263133 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-logs\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.267971 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.288977 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.306963 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.307625 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5vtf8\" (UniqueName: \"kubernetes.io/projected/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-kube-api-access-5vtf8\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.319839 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.321061 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.408311 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.517730 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-52tbh"] Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.614132 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6b94985954-h6p9h"] Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.787827 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65f64c1b-8090-4f51-9a93-46a36ff28baa","Type":"ContainerStarted","Data":"d0a2c8a6aee8c323f400c7f7d49ea1d181e36f3154256d02c3e70012dbbf9fdf"} Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.790574 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hzbxg" event={"ID":"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4","Type":"ContainerStarted","Data":"98b85101cf133b251b3c4ea10424c224c5318fca5d278e330bcaa598ce042f82"} Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.790639 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hzbxg" event={"ID":"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4","Type":"ContainerStarted","Data":"c538719ef8f220fb69f93d1f0ed065e2bad0a67fd827df7f93552883803ea642"} Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.803459 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79b94b5d95-k97q6" event={"ID":"a49af42e-3a80-4f2a-9b4b-f43946a32c49","Type":"ContainerStarted","Data":"ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c"} Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.803526 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79b94b5d95-k97q6" event={"ID":"a49af42e-3a80-4f2a-9b4b-f43946a32c49","Type":"ContainerStarted","Data":"b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940"} Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.803702 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-79b94b5d95-k97q6" podUID="a49af42e-3a80-4f2a-9b4b-f43946a32c49" containerName="horizon-log" containerID="cri-o://b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940" gracePeriod=30 Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.804534 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-79b94b5d95-k97q6" podUID="a49af42e-3a80-4f2a-9b4b-f43946a32c49" containerName="horizon" containerID="cri-o://ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c" gracePeriod=30 Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.826569 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-hzbxg" podStartSLOduration=20.826548601 podStartE2EDuration="20.826548601s" podCreationTimestamp="2025-11-25 21:47:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:47.812515208 +0000 UTC m=+1023.274991540" watchObservedRunningTime="2025-11-25 21:47:47.826548601 +0000 UTC m=+1023.289024923" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.827014 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" event={"ID":"9ad0d21f-7e9e-45a9-9c13-ff948f290e77","Type":"ContainerStarted","Data":"2b102cf60990d5bba73bf536f8fbbe4a47b1a3c8f3c4adbf5493948c74d12f0a"} Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.841096 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d002bfa2-970b-44fc-b839-8e114323162e","Type":"ContainerStarted","Data":"2d236b04befb235c2913dc07358dcaac8eefe34367627de30cb431f5aefa1e87"} Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.842842 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b94985954-h6p9h" event={"ID":"ce75c296-74f5-4f94-bcdf-58bb1d44f445","Type":"ContainerStarted","Data":"7dffe93d7de1a059353097a174f448e5d599030aa297b44403aad8b9fa8373d1"} Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.853429 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-657976db8d-swkbt" event={"ID":"7941e190-b648-4b11-946b-dddaa1bc98d9","Type":"ContainerStarted","Data":"627349337ae740ebd4dfd16a8ca8efe188270bc739f028faca622d9d48f95a37"} Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.880514 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58f8d7cc56-csk7l" event={"ID":"78dc494b-f987-443a-a350-1988639b6fee","Type":"ContainerStarted","Data":"3a6d55cf981774e9e32bdc5ee3a9fbb53ace068261c91c2d651a817d4ca4dc1f"} Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.880571 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58f8d7cc56-csk7l" event={"ID":"78dc494b-f987-443a-a350-1988639b6fee","Type":"ContainerStarted","Data":"3d1388e3f9936a5e6838117a86dbadfac66d243bb6179a027f0f0a595cdb55ae"} Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.880581 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58f8d7cc56-csk7l" event={"ID":"78dc494b-f987-443a-a350-1988639b6fee","Type":"ContainerStarted","Data":"3abf372eb70ee21335b052fff6640229310346d6931e7d3ce88098687041568e"} Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.935718 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-79b94b5d95-k97q6" podStartSLOduration=4.317000878 podStartE2EDuration="31.935680346s" podCreationTimestamp="2025-11-25 21:47:16 +0000 UTC" firstStartedPulling="2025-11-25 21:47:18.103662063 +0000 UTC m=+993.566138385" lastFinishedPulling="2025-11-25 21:47:45.722341531 +0000 UTC m=+1021.184817853" observedRunningTime="2025-11-25 21:47:47.846788668 +0000 UTC m=+1023.309265000" watchObservedRunningTime="2025-11-25 21:47:47.935680346 +0000 UTC m=+1023.398156668" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.946807 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-657976db8d-swkbt" podStartSLOduration=22.946786401 podStartE2EDuration="22.946786401s" podCreationTimestamp="2025-11-25 21:47:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:47.891878014 +0000 UTC m=+1023.354354336" watchObservedRunningTime="2025-11-25 21:47:47.946786401 +0000 UTC m=+1023.409262723" Nov 25 21:47:47 crc kubenswrapper[4910]: I1125 21:47:47.970770 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-58f8d7cc56-csk7l" podStartSLOduration=22.970733416 podStartE2EDuration="22.970733416s" podCreationTimestamp="2025-11-25 21:47:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:47.918812369 +0000 UTC m=+1023.381288691" watchObservedRunningTime="2025-11-25 21:47:47.970733416 +0000 UTC m=+1023.433209748" Nov 25 21:47:48 crc kubenswrapper[4910]: W1125 21:47:48.038369 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf2e4643d_59dd_4252_8b35_c6f3003aa3d0.slice/crio-2658ec53139f23020396273c01bebe6484b769dc371e4d242598e53112f1d28b WatchSource:0}: Error finding container 2658ec53139f23020396273c01bebe6484b769dc371e4d242598e53112f1d28b: Status 404 returned error can't find the container with id 2658ec53139f23020396273c01bebe6484b769dc371e4d242598e53112f1d28b Nov 25 21:47:48 crc kubenswrapper[4910]: I1125 21:47:48.039760 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:47:48 crc kubenswrapper[4910]: I1125 21:47:48.909842 4910 generic.go:334] "Generic (PLEG): container finished" podID="9ad0d21f-7e9e-45a9-9c13-ff948f290e77" containerID="fef354f192599f289de05b42efa804f3df74fab9c02e4f5944dee1a6061c6448" exitCode=0 Nov 25 21:47:48 crc kubenswrapper[4910]: I1125 21:47:48.910174 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" event={"ID":"9ad0d21f-7e9e-45a9-9c13-ff948f290e77","Type":"ContainerDied","Data":"fef354f192599f289de05b42efa804f3df74fab9c02e4f5944dee1a6061c6448"} Nov 25 21:47:48 crc kubenswrapper[4910]: I1125 21:47:48.922936 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d002bfa2-970b-44fc-b839-8e114323162e","Type":"ContainerStarted","Data":"46411f46a843a066b53eef3ba79cdb632929ba2f24e36df33d1eabbe80b9565e"} Nov 25 21:47:48 crc kubenswrapper[4910]: I1125 21:47:48.948419 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b94985954-h6p9h" event={"ID":"ce75c296-74f5-4f94-bcdf-58bb1d44f445","Type":"ContainerStarted","Data":"83a4db89007a69a8fb58c5a4a438dd5ae705dd0757f3686acaede6daf5df836c"} Nov 25 21:47:48 crc kubenswrapper[4910]: I1125 21:47:48.948469 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b94985954-h6p9h" event={"ID":"ce75c296-74f5-4f94-bcdf-58bb1d44f445","Type":"ContainerStarted","Data":"2831751b92e100fb850ae3355d63ca71ab3c40b2f6e467623d701b84f49da36e"} Nov 25 21:47:48 crc kubenswrapper[4910]: I1125 21:47:48.949530 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:47:48 crc kubenswrapper[4910]: I1125 21:47:48.971156 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f2e4643d-59dd-4252-8b35-c6f3003aa3d0","Type":"ContainerStarted","Data":"2658ec53139f23020396273c01bebe6484b769dc371e4d242598e53112f1d28b"} Nov 25 21:47:48 crc kubenswrapper[4910]: I1125 21:47:48.983469 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6b94985954-h6p9h" podStartSLOduration=2.983443965 podStartE2EDuration="2.983443965s" podCreationTimestamp="2025-11-25 21:47:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:48.974740764 +0000 UTC m=+1024.437217126" watchObservedRunningTime="2025-11-25 21:47:48.983443965 +0000 UTC m=+1024.445920287" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.371682 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-667bcb4bc9-bl288"] Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.373332 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.382071 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.382474 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.384882 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-667bcb4bc9-bl288"] Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.516748 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z57ps\" (UniqueName: \"kubernetes.io/projected/7935f6eb-171e-43a8-9f6c-6bf62769ade6-kube-api-access-z57ps\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.516821 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-public-tls-certs\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.516876 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-config\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.516928 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-internal-tls-certs\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.516946 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-combined-ca-bundle\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.516965 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-ovndb-tls-certs\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.517013 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-httpd-config\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.619365 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-internal-tls-certs\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.619427 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-combined-ca-bundle\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.619466 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-ovndb-tls-certs\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.619525 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-httpd-config\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.619914 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z57ps\" (UniqueName: \"kubernetes.io/projected/7935f6eb-171e-43a8-9f6c-6bf62769ade6-kube-api-access-z57ps\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.619962 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-public-tls-certs\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.620014 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-config\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.627074 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-ovndb-tls-certs\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.627864 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-config\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.628555 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-httpd-config\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.632774 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-internal-tls-certs\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.634999 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-combined-ca-bundle\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.640696 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z57ps\" (UniqueName: \"kubernetes.io/projected/7935f6eb-171e-43a8-9f6c-6bf62769ade6-kube-api-access-z57ps\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.649792 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7935f6eb-171e-43a8-9f6c-6bf62769ade6-public-tls-certs\") pod \"neutron-667bcb4bc9-bl288\" (UID: \"7935f6eb-171e-43a8-9f6c-6bf62769ade6\") " pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:49 crc kubenswrapper[4910]: I1125 21:47:49.699779 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:50 crc kubenswrapper[4910]: I1125 21:47:50.005778 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d002bfa2-970b-44fc-b839-8e114323162e","Type":"ContainerStarted","Data":"57c4b90923fcd286b2e85e9646169a796ec2a28e4b29788bf5fb116a32864d6f"} Nov 25 21:47:50 crc kubenswrapper[4910]: I1125 21:47:50.010477 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f2e4643d-59dd-4252-8b35-c6f3003aa3d0","Type":"ContainerStarted","Data":"2ee3018aa2cdd5ada4ecc518f96b93a45dcf57c4e8f7487fb67bd09c483a0232"} Nov 25 21:47:50 crc kubenswrapper[4910]: I1125 21:47:50.901395 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-667bcb4bc9-bl288"] Nov 25 21:47:50 crc kubenswrapper[4910]: W1125 21:47:50.914026 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7935f6eb_171e_43a8_9f6c_6bf62769ade6.slice/crio-8ea8c0f48e222325d78ef0e8cb30538b41dc43599efca6181656284e178b51ab WatchSource:0}: Error finding container 8ea8c0f48e222325d78ef0e8cb30538b41dc43599efca6181656284e178b51ab: Status 404 returned error can't find the container with id 8ea8c0f48e222325d78ef0e8cb30538b41dc43599efca6181656284e178b51ab Nov 25 21:47:51 crc kubenswrapper[4910]: I1125 21:47:51.047003 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f2e4643d-59dd-4252-8b35-c6f3003aa3d0","Type":"ContainerStarted","Data":"0c983214d8e6bfa114d31ef3aae2f0cf76f47113e3011d6a00e49e39becd1303"} Nov 25 21:47:51 crc kubenswrapper[4910]: I1125 21:47:51.053167 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" event={"ID":"9ad0d21f-7e9e-45a9-9c13-ff948f290e77","Type":"ContainerStarted","Data":"3155f37560c10c34d0c4f774932c19a36e6fcf08e2b5d8a5f7beee87db578fc4"} Nov 25 21:47:51 crc kubenswrapper[4910]: I1125 21:47:51.053446 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:51 crc kubenswrapper[4910]: I1125 21:47:51.067389 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-667bcb4bc9-bl288" event={"ID":"7935f6eb-171e-43a8-9f6c-6bf62769ade6","Type":"ContainerStarted","Data":"8ea8c0f48e222325d78ef0e8cb30538b41dc43599efca6181656284e178b51ab"} Nov 25 21:47:51 crc kubenswrapper[4910]: I1125 21:47:51.086670 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.086647949 podStartE2EDuration="5.086647949s" podCreationTimestamp="2025-11-25 21:47:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:51.074777474 +0000 UTC m=+1026.537253796" watchObservedRunningTime="2025-11-25 21:47:51.086647949 +0000 UTC m=+1026.549124261" Nov 25 21:47:51 crc kubenswrapper[4910]: I1125 21:47:51.104401 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" podStartSLOduration=5.104379159 podStartE2EDuration="5.104379159s" podCreationTimestamp="2025-11-25 21:47:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:51.100549598 +0000 UTC m=+1026.563025920" watchObservedRunningTime="2025-11-25 21:47:51.104379159 +0000 UTC m=+1026.566855481" Nov 25 21:47:51 crc kubenswrapper[4910]: I1125 21:47:51.140769 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=12.140745024 podStartE2EDuration="12.140745024s" podCreationTimestamp="2025-11-25 21:47:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:51.135582617 +0000 UTC m=+1026.598058939" watchObservedRunningTime="2025-11-25 21:47:51.140745024 +0000 UTC m=+1026.603221346" Nov 25 21:47:52 crc kubenswrapper[4910]: I1125 21:47:52.089048 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-667bcb4bc9-bl288" event={"ID":"7935f6eb-171e-43a8-9f6c-6bf62769ade6","Type":"ContainerStarted","Data":"d36260a8b22a41330446019f2f9e7e2d262ae17764bce3ce9bcd8328b2853586"} Nov 25 21:47:52 crc kubenswrapper[4910]: I1125 21:47:52.089362 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-667bcb4bc9-bl288" event={"ID":"7935f6eb-171e-43a8-9f6c-6bf62769ade6","Type":"ContainerStarted","Data":"772641b5c33158b99460dd40ab7c553d47aebf453ce3115ad612118c9821dacf"} Nov 25 21:47:52 crc kubenswrapper[4910]: I1125 21:47:52.089803 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:47:52 crc kubenswrapper[4910]: I1125 21:47:52.128350 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-667bcb4bc9-bl288" podStartSLOduration=3.128316327 podStartE2EDuration="3.128316327s" podCreationTimestamp="2025-11-25 21:47:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:52.115951669 +0000 UTC m=+1027.578428011" watchObservedRunningTime="2025-11-25 21:47:52.128316327 +0000 UTC m=+1027.590792659" Nov 25 21:47:53 crc kubenswrapper[4910]: I1125 21:47:53.102116 4910 generic.go:334] "Generic (PLEG): container finished" podID="3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4" containerID="98b85101cf133b251b3c4ea10424c224c5318fca5d278e330bcaa598ce042f82" exitCode=0 Nov 25 21:47:53 crc kubenswrapper[4910]: I1125 21:47:53.103986 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hzbxg" event={"ID":"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4","Type":"ContainerDied","Data":"98b85101cf133b251b3c4ea10424c224c5318fca5d278e330bcaa598ce042f82"} Nov 25 21:47:55 crc kubenswrapper[4910]: I1125 21:47:55.351457 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:55 crc kubenswrapper[4910]: I1125 21:47:55.373900 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:47:55 crc kubenswrapper[4910]: I1125 21:47:55.497591 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:55 crc kubenswrapper[4910]: I1125 21:47:55.497674 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:47:55 crc kubenswrapper[4910]: I1125 21:47:55.838688 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.027013 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-combined-ca-bundle\") pod \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.027114 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9t5s8\" (UniqueName: \"kubernetes.io/projected/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-kube-api-access-9t5s8\") pod \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.027151 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-fernet-keys\") pod \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.027334 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-scripts\") pod \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.027941 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-credential-keys\") pod \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.028028 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-config-data\") pod \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\" (UID: \"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4\") " Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.036431 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4" (UID: "3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.036605 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-kube-api-access-9t5s8" (OuterVolumeSpecName: "kube-api-access-9t5s8") pod "3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4" (UID: "3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4"). InnerVolumeSpecName "kube-api-access-9t5s8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.037002 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4" (UID: "3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.046527 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-scripts" (OuterVolumeSpecName: "scripts") pod "3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4" (UID: "3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.066687 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4" (UID: "3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.067547 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-config-data" (OuterVolumeSpecName: "config-data") pod "3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4" (UID: "3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.132273 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.132787 4910 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.132805 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.132818 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.132830 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9t5s8\" (UniqueName: \"kubernetes.io/projected/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-kube-api-access-9t5s8\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.132844 4910 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.141010 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65f64c1b-8090-4f51-9a93-46a36ff28baa","Type":"ContainerStarted","Data":"d7fa58580aab3a327867da73aa97cf4d66f6ffeba4c879755b8c2f96ca548cbd"} Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.143538 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hzbxg" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.143638 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hzbxg" event={"ID":"3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4","Type":"ContainerDied","Data":"c538719ef8f220fb69f93d1f0ed065e2bad0a67fd827df7f93552883803ea642"} Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.143669 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c538719ef8f220fb69f93d1f0ed065e2bad0a67fd827df7f93552883803ea642" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.826410 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.897030 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.927763 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-6tggk"] Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.928477 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" podUID="40229997-b3af-4531-8bf1-e8ac2aed63e5" containerName="dnsmasq-dns" containerID="cri-o://b250e040972a20cdcbe57b4900dbadb24a5b68db30164a5e1d34cccae2015294" gracePeriod=10 Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.994518 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-f7657d4c8-n2wbh"] Nov 25 21:47:56 crc kubenswrapper[4910]: E1125 21:47:56.994977 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4" containerName="keystone-bootstrap" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.994996 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4" containerName="keystone-bootstrap" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.995159 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4" containerName="keystone-bootstrap" Nov 25 21:47:56 crc kubenswrapper[4910]: I1125 21:47:56.995840 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.006090 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.006310 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.006421 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.006551 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.006816 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.007488 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-v6h2m" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.011010 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f7657d4c8-n2wbh"] Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.033147 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" podUID="40229997-b3af-4531-8bf1-e8ac2aed63e5" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.140:5353: connect: connection refused" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.071513 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-scripts\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.071811 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-public-tls-certs\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.071898 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-combined-ca-bundle\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.071977 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-internal-tls-certs\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.072050 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqzd5\" (UniqueName: \"kubernetes.io/projected/1fc29606-ff34-4170-859a-8357838d9b65-kube-api-access-bqzd5\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.072127 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-config-data\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.072218 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-credential-keys\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.072345 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-fernet-keys\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.162667 4910 generic.go:334] "Generic (PLEG): container finished" podID="40229997-b3af-4531-8bf1-e8ac2aed63e5" containerID="b250e040972a20cdcbe57b4900dbadb24a5b68db30164a5e1d34cccae2015294" exitCode=0 Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.162997 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" event={"ID":"40229997-b3af-4531-8bf1-e8ac2aed63e5","Type":"ContainerDied","Data":"b250e040972a20cdcbe57b4900dbadb24a5b68db30164a5e1d34cccae2015294"} Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.173826 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-scripts\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.174137 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-public-tls-certs\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.174232 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-combined-ca-bundle\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.174337 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-internal-tls-certs\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.174414 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqzd5\" (UniqueName: \"kubernetes.io/projected/1fc29606-ff34-4170-859a-8357838d9b65-kube-api-access-bqzd5\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.174497 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-config-data\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.174585 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-credential-keys\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.174690 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-fernet-keys\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.182521 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-config-data\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.183144 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-fernet-keys\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.183320 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-scripts\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.185839 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-internal-tls-certs\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.195908 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-combined-ca-bundle\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.196363 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-credential-keys\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.198813 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1fc29606-ff34-4170-859a-8357838d9b65-public-tls-certs\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.200763 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqzd5\" (UniqueName: \"kubernetes.io/projected/1fc29606-ff34-4170-859a-8357838d9b65-kube-api-access-bqzd5\") pod \"keystone-f7657d4c8-n2wbh\" (UID: \"1fc29606-ff34-4170-859a-8357838d9b65\") " pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.322605 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.410568 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.410627 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.456272 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.463209 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.473830 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.598345 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-ovsdbserver-sb\") pod \"40229997-b3af-4531-8bf1-e8ac2aed63e5\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.599603 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-config\") pod \"40229997-b3af-4531-8bf1-e8ac2aed63e5\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.599663 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-dns-svc\") pod \"40229997-b3af-4531-8bf1-e8ac2aed63e5\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.599818 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-dns-swift-storage-0\") pod \"40229997-b3af-4531-8bf1-e8ac2aed63e5\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.599866 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h6kj4\" (UniqueName: \"kubernetes.io/projected/40229997-b3af-4531-8bf1-e8ac2aed63e5-kube-api-access-h6kj4\") pod \"40229997-b3af-4531-8bf1-e8ac2aed63e5\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.599932 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-ovsdbserver-nb\") pod \"40229997-b3af-4531-8bf1-e8ac2aed63e5\" (UID: \"40229997-b3af-4531-8bf1-e8ac2aed63e5\") " Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.621967 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/40229997-b3af-4531-8bf1-e8ac2aed63e5-kube-api-access-h6kj4" (OuterVolumeSpecName: "kube-api-access-h6kj4") pod "40229997-b3af-4531-8bf1-e8ac2aed63e5" (UID: "40229997-b3af-4531-8bf1-e8ac2aed63e5"). InnerVolumeSpecName "kube-api-access-h6kj4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.657602 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-config" (OuterVolumeSpecName: "config") pod "40229997-b3af-4531-8bf1-e8ac2aed63e5" (UID: "40229997-b3af-4531-8bf1-e8ac2aed63e5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.659519 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "40229997-b3af-4531-8bf1-e8ac2aed63e5" (UID: "40229997-b3af-4531-8bf1-e8ac2aed63e5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.685849 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "40229997-b3af-4531-8bf1-e8ac2aed63e5" (UID: "40229997-b3af-4531-8bf1-e8ac2aed63e5"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.686430 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "40229997-b3af-4531-8bf1-e8ac2aed63e5" (UID: "40229997-b3af-4531-8bf1-e8ac2aed63e5"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.698130 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "40229997-b3af-4531-8bf1-e8ac2aed63e5" (UID: "40229997-b3af-4531-8bf1-e8ac2aed63e5"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.704534 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.704571 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h6kj4\" (UniqueName: \"kubernetes.io/projected/40229997-b3af-4531-8bf1-e8ac2aed63e5-kube-api-access-h6kj4\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.704583 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.704592 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.704601 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:57 crc kubenswrapper[4910]: I1125 21:47:57.704610 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/40229997-b3af-4531-8bf1-e8ac2aed63e5-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:47:58 crc kubenswrapper[4910]: I1125 21:47:58.018758 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f7657d4c8-n2wbh"] Nov 25 21:47:58 crc kubenswrapper[4910]: I1125 21:47:58.175698 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f7657d4c8-n2wbh" event={"ID":"1fc29606-ff34-4170-859a-8357838d9b65","Type":"ContainerStarted","Data":"7948416c76ed13c8616417fa6637d8f3290dcfab53ae9c0000e7f2c305520c39"} Nov 25 21:47:58 crc kubenswrapper[4910]: I1125 21:47:58.177995 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" event={"ID":"40229997-b3af-4531-8bf1-e8ac2aed63e5","Type":"ContainerDied","Data":"9a81f9ea3be2a0abcbbebfbe89135e951029c32f2dba34caa916a45b9f79d28d"} Nov 25 21:47:58 crc kubenswrapper[4910]: I1125 21:47:58.178138 4910 scope.go:117] "RemoveContainer" containerID="b250e040972a20cdcbe57b4900dbadb24a5b68db30164a5e1d34cccae2015294" Nov 25 21:47:58 crc kubenswrapper[4910]: I1125 21:47:58.178009 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57c957c4ff-6tggk" Nov 25 21:47:58 crc kubenswrapper[4910]: I1125 21:47:58.187603 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-djprh" event={"ID":"909bc667-1a51-44ef-b676-dabab2050b4e","Type":"ContainerStarted","Data":"ac6c521f0abba3b2bd9e0c3410b7140b6c2fa69e307ad0aac6495f000a16ae5f"} Nov 25 21:47:58 crc kubenswrapper[4910]: I1125 21:47:58.189355 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 21:47:58 crc kubenswrapper[4910]: I1125 21:47:58.189593 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 21:47:58 crc kubenswrapper[4910]: I1125 21:47:58.222675 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-djprh" podStartSLOduration=2.521331225 podStartE2EDuration="42.222648903s" podCreationTimestamp="2025-11-25 21:47:16 +0000 UTC" firstStartedPulling="2025-11-25 21:47:18.090049532 +0000 UTC m=+993.552525854" lastFinishedPulling="2025-11-25 21:47:57.79136721 +0000 UTC m=+1033.253843532" observedRunningTime="2025-11-25 21:47:58.210429029 +0000 UTC m=+1033.672905351" watchObservedRunningTime="2025-11-25 21:47:58.222648903 +0000 UTC m=+1033.685125225" Nov 25 21:47:58 crc kubenswrapper[4910]: I1125 21:47:58.223381 4910 scope.go:117] "RemoveContainer" containerID="ec9056a732dbeffbfa763919006df073383e68dd1ed5e99330932a01d55e57ee" Nov 25 21:47:58 crc kubenswrapper[4910]: I1125 21:47:58.270271 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-6tggk"] Nov 25 21:47:58 crc kubenswrapper[4910]: I1125 21:47:58.294797 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57c957c4ff-6tggk"] Nov 25 21:47:59 crc kubenswrapper[4910]: I1125 21:47:59.201335 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f7657d4c8-n2wbh" event={"ID":"1fc29606-ff34-4170-859a-8357838d9b65","Type":"ContainerStarted","Data":"4927e592893e4e59b334c98911356f6fa1a24ea9005312abe23b38e41f2a68fe"} Nov 25 21:47:59 crc kubenswrapper[4910]: I1125 21:47:59.202007 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:47:59 crc kubenswrapper[4910]: I1125 21:47:59.224031 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="40229997-b3af-4531-8bf1-e8ac2aed63e5" path="/var/lib/kubelet/pods/40229997-b3af-4531-8bf1-e8ac2aed63e5/volumes" Nov 25 21:47:59 crc kubenswrapper[4910]: I1125 21:47:59.866862 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 21:47:59 crc kubenswrapper[4910]: I1125 21:47:59.867459 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 21:47:59 crc kubenswrapper[4910]: I1125 21:47:59.919484 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 21:47:59 crc kubenswrapper[4910]: I1125 21:47:59.929552 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 21:47:59 crc kubenswrapper[4910]: I1125 21:47:59.954215 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-f7657d4c8-n2wbh" podStartSLOduration=3.954181064 podStartE2EDuration="3.954181064s" podCreationTimestamp="2025-11-25 21:47:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:47:59.229121187 +0000 UTC m=+1034.691597509" watchObservedRunningTime="2025-11-25 21:47:59.954181064 +0000 UTC m=+1035.416657396" Nov 25 21:48:00 crc kubenswrapper[4910]: I1125 21:48:00.218653 4910 generic.go:334] "Generic (PLEG): container finished" podID="909bc667-1a51-44ef-b676-dabab2050b4e" containerID="ac6c521f0abba3b2bd9e0c3410b7140b6c2fa69e307ad0aac6495f000a16ae5f" exitCode=0 Nov 25 21:48:00 crc kubenswrapper[4910]: I1125 21:48:00.218750 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-djprh" event={"ID":"909bc667-1a51-44ef-b676-dabab2050b4e","Type":"ContainerDied","Data":"ac6c521f0abba3b2bd9e0c3410b7140b6c2fa69e307ad0aac6495f000a16ae5f"} Nov 25 21:48:00 crc kubenswrapper[4910]: I1125 21:48:00.221971 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-t82b5" event={"ID":"7e650049-c8bd-4a60-a1f7-1b022752ff7a","Type":"ContainerStarted","Data":"9b54dbe72aa3d0767700a882d35711169fd8d9ac869c6d870f7b28b2cd4ea119"} Nov 25 21:48:00 crc kubenswrapper[4910]: I1125 21:48:00.222022 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 21:48:00 crc kubenswrapper[4910]: I1125 21:48:00.223636 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 21:48:00 crc kubenswrapper[4910]: I1125 21:48:00.244857 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-t82b5" podStartSLOduration=4.2248683830000004 podStartE2EDuration="44.244831906s" podCreationTimestamp="2025-11-25 21:47:16 +0000 UTC" firstStartedPulling="2025-11-25 21:47:18.885777575 +0000 UTC m=+994.348253897" lastFinishedPulling="2025-11-25 21:47:58.905741098 +0000 UTC m=+1034.368217420" observedRunningTime="2025-11-25 21:48:00.242961756 +0000 UTC m=+1035.705438078" watchObservedRunningTime="2025-11-25 21:48:00.244831906 +0000 UTC m=+1035.707308228" Nov 25 21:48:00 crc kubenswrapper[4910]: I1125 21:48:00.325935 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 21:48:00 crc kubenswrapper[4910]: I1125 21:48:00.326580 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 21:48:02 crc kubenswrapper[4910]: I1125 21:48:02.244737 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-xx5nj" event={"ID":"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4","Type":"ContainerStarted","Data":"d18973c06dc26f92b8f4426573f9376ee1c34a857b14ef2f24c70116170a4388"} Nov 25 21:48:02 crc kubenswrapper[4910]: I1125 21:48:02.250168 4910 generic.go:334] "Generic (PLEG): container finished" podID="7e650049-c8bd-4a60-a1f7-1b022752ff7a" containerID="9b54dbe72aa3d0767700a882d35711169fd8d9ac869c6d870f7b28b2cd4ea119" exitCode=0 Nov 25 21:48:02 crc kubenswrapper[4910]: I1125 21:48:02.250301 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 21:48:02 crc kubenswrapper[4910]: I1125 21:48:02.250314 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 21:48:02 crc kubenswrapper[4910]: I1125 21:48:02.250973 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-t82b5" event={"ID":"7e650049-c8bd-4a60-a1f7-1b022752ff7a","Type":"ContainerDied","Data":"9b54dbe72aa3d0767700a882d35711169fd8d9ac869c6d870f7b28b2cd4ea119"} Nov 25 21:48:02 crc kubenswrapper[4910]: I1125 21:48:02.267591 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-xx5nj" podStartSLOduration=3.311855209 podStartE2EDuration="46.267570944s" podCreationTimestamp="2025-11-25 21:47:16 +0000 UTC" firstStartedPulling="2025-11-25 21:47:17.671488636 +0000 UTC m=+993.133964958" lastFinishedPulling="2025-11-25 21:48:00.627204371 +0000 UTC m=+1036.089680693" observedRunningTime="2025-11-25 21:48:02.266563087 +0000 UTC m=+1037.729039409" watchObservedRunningTime="2025-11-25 21:48:02.267570944 +0000 UTC m=+1037.730047256" Nov 25 21:48:02 crc kubenswrapper[4910]: I1125 21:48:02.865586 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 21:48:03 crc kubenswrapper[4910]: I1125 21:48:03.050812 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 21:48:05 crc kubenswrapper[4910]: I1125 21:48:05.375984 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-58f8d7cc56-csk7l" podUID="78dc494b-f987-443a-a350-1988639b6fee" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.147:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.147:8443: connect: connection refused" Nov 25 21:48:05 crc kubenswrapper[4910]: I1125 21:48:05.499714 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-657976db8d-swkbt" podUID="7941e190-b648-4b11-946b-dddaa1bc98d9" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.148:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.148:8443: connect: connection refused" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.155256 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-djprh" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.163122 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-t82b5" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.263203 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rzn45\" (UniqueName: \"kubernetes.io/projected/7e650049-c8bd-4a60-a1f7-1b022752ff7a-kube-api-access-rzn45\") pod \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\" (UID: \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\") " Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.263322 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-combined-ca-bundle\") pod \"909bc667-1a51-44ef-b676-dabab2050b4e\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.263365 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e650049-c8bd-4a60-a1f7-1b022752ff7a-combined-ca-bundle\") pod \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\" (UID: \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\") " Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.263397 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/909bc667-1a51-44ef-b676-dabab2050b4e-logs\") pod \"909bc667-1a51-44ef-b676-dabab2050b4e\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.263448 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7e650049-c8bd-4a60-a1f7-1b022752ff7a-db-sync-config-data\") pod \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\" (UID: \"7e650049-c8bd-4a60-a1f7-1b022752ff7a\") " Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.263467 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-config-data\") pod \"909bc667-1a51-44ef-b676-dabab2050b4e\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.263761 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-scripts\") pod \"909bc667-1a51-44ef-b676-dabab2050b4e\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.263824 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k4mpl\" (UniqueName: \"kubernetes.io/projected/909bc667-1a51-44ef-b676-dabab2050b4e-kube-api-access-k4mpl\") pod \"909bc667-1a51-44ef-b676-dabab2050b4e\" (UID: \"909bc667-1a51-44ef-b676-dabab2050b4e\") " Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.272609 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/909bc667-1a51-44ef-b676-dabab2050b4e-logs" (OuterVolumeSpecName: "logs") pod "909bc667-1a51-44ef-b676-dabab2050b4e" (UID: "909bc667-1a51-44ef-b676-dabab2050b4e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.274274 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/909bc667-1a51-44ef-b676-dabab2050b4e-kube-api-access-k4mpl" (OuterVolumeSpecName: "kube-api-access-k4mpl") pod "909bc667-1a51-44ef-b676-dabab2050b4e" (UID: "909bc667-1a51-44ef-b676-dabab2050b4e"). InnerVolumeSpecName "kube-api-access-k4mpl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.274934 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e650049-c8bd-4a60-a1f7-1b022752ff7a-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "7e650049-c8bd-4a60-a1f7-1b022752ff7a" (UID: "7e650049-c8bd-4a60-a1f7-1b022752ff7a"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.280117 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-scripts" (OuterVolumeSpecName: "scripts") pod "909bc667-1a51-44ef-b676-dabab2050b4e" (UID: "909bc667-1a51-44ef-b676-dabab2050b4e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.280667 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e650049-c8bd-4a60-a1f7-1b022752ff7a-kube-api-access-rzn45" (OuterVolumeSpecName: "kube-api-access-rzn45") pod "7e650049-c8bd-4a60-a1f7-1b022752ff7a" (UID: "7e650049-c8bd-4a60-a1f7-1b022752ff7a"). InnerVolumeSpecName "kube-api-access-rzn45". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.311213 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "909bc667-1a51-44ef-b676-dabab2050b4e" (UID: "909bc667-1a51-44ef-b676-dabab2050b4e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.311354 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e650049-c8bd-4a60-a1f7-1b022752ff7a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7e650049-c8bd-4a60-a1f7-1b022752ff7a" (UID: "7e650049-c8bd-4a60-a1f7-1b022752ff7a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.315973 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-t82b5" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.315975 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-t82b5" event={"ID":"7e650049-c8bd-4a60-a1f7-1b022752ff7a","Type":"ContainerDied","Data":"6035c8238dc642073c017d425714504c618bf109be08dce5c5f3b312f8204e8f"} Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.316058 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6035c8238dc642073c017d425714504c618bf109be08dce5c5f3b312f8204e8f" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.318988 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-djprh" event={"ID":"909bc667-1a51-44ef-b676-dabab2050b4e","Type":"ContainerDied","Data":"f39d8cb7e5f17559c0212e05269e4f5a1686e37e1bfdd6dec5baff21224ce0db"} Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.319015 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f39d8cb7e5f17559c0212e05269e4f5a1686e37e1bfdd6dec5baff21224ce0db" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.319075 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-djprh" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.330202 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-config-data" (OuterVolumeSpecName: "config-data") pod "909bc667-1a51-44ef-b676-dabab2050b4e" (UID: "909bc667-1a51-44ef-b676-dabab2050b4e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.366774 4910 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/7e650049-c8bd-4a60-a1f7-1b022752ff7a-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.366816 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.366834 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.366843 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k4mpl\" (UniqueName: \"kubernetes.io/projected/909bc667-1a51-44ef-b676-dabab2050b4e-kube-api-access-k4mpl\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.366855 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rzn45\" (UniqueName: \"kubernetes.io/projected/7e650049-c8bd-4a60-a1f7-1b022752ff7a-kube-api-access-rzn45\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.366865 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/909bc667-1a51-44ef-b676-dabab2050b4e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.366872 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7e650049-c8bd-4a60-a1f7-1b022752ff7a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:06 crc kubenswrapper[4910]: I1125 21:48:06.366881 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/909bc667-1a51-44ef-b676-dabab2050b4e-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.518199 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-64ff96875d-p4n97"] Nov 25 21:48:07 crc kubenswrapper[4910]: E1125 21:48:07.518778 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="909bc667-1a51-44ef-b676-dabab2050b4e" containerName="placement-db-sync" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.518793 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="909bc667-1a51-44ef-b676-dabab2050b4e" containerName="placement-db-sync" Nov 25 21:48:07 crc kubenswrapper[4910]: E1125 21:48:07.518804 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40229997-b3af-4531-8bf1-e8ac2aed63e5" containerName="dnsmasq-dns" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.518809 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="40229997-b3af-4531-8bf1-e8ac2aed63e5" containerName="dnsmasq-dns" Nov 25 21:48:07 crc kubenswrapper[4910]: E1125 21:48:07.518835 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e650049-c8bd-4a60-a1f7-1b022752ff7a" containerName="barbican-db-sync" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.518842 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e650049-c8bd-4a60-a1f7-1b022752ff7a" containerName="barbican-db-sync" Nov 25 21:48:07 crc kubenswrapper[4910]: E1125 21:48:07.518855 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="40229997-b3af-4531-8bf1-e8ac2aed63e5" containerName="init" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.518860 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="40229997-b3af-4531-8bf1-e8ac2aed63e5" containerName="init" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.519025 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="40229997-b3af-4531-8bf1-e8ac2aed63e5" containerName="dnsmasq-dns" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.519035 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e650049-c8bd-4a60-a1f7-1b022752ff7a" containerName="barbican-db-sync" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.519057 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="909bc667-1a51-44ef-b676-dabab2050b4e" containerName="placement-db-sync" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.520009 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.521880 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.522957 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.523269 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-qtfh4" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.523661 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.524015 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.554909 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-64ff96875d-p4n97"] Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.592783 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-config-data\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.592871 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bw4th\" (UniqueName: \"kubernetes.io/projected/d7f10efc-4222-4871-b684-dc482fd27b01-kube-api-access-bw4th\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.592907 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-scripts\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.592927 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-combined-ca-bundle\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.592986 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-internal-tls-certs\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.593004 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-public-tls-certs\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.593033 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7f10efc-4222-4871-b684-dc482fd27b01-logs\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.607133 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-865b64f5bb-fdgzg"] Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.612433 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.615203 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.616447 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.619075 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-gpzm4" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.624151 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-865b64f5bb-fdgzg"] Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.644860 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7fdcb7f4c9-g5zfq"] Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.646445 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.649397 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.657298 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7fdcb7f4c9-g5zfq"] Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.694805 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7193b97a-2be1-4f8f-9e84-abb09908f78c-logs\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.694880 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-public-tls-certs\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.694956 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/463bc99a-ad40-4df5-9b99-d10d0af67cea-combined-ca-bundle\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.695026 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjcx4\" (UniqueName: \"kubernetes.io/projected/463bc99a-ad40-4df5-9b99-d10d0af67cea-kube-api-access-fjcx4\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.695055 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7f10efc-4222-4871-b684-dc482fd27b01-logs\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.695171 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-config-data\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.695207 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/463bc99a-ad40-4df5-9b99-d10d0af67cea-config-data\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.695485 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/463bc99a-ad40-4df5-9b99-d10d0af67cea-logs\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.695510 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d7f10efc-4222-4871-b684-dc482fd27b01-logs\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.695589 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bw4th\" (UniqueName: \"kubernetes.io/projected/d7f10efc-4222-4871-b684-dc482fd27b01-kube-api-access-bw4th\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.695690 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7193b97a-2be1-4f8f-9e84-abb09908f78c-combined-ca-bundle\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.695780 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/463bc99a-ad40-4df5-9b99-d10d0af67cea-config-data-custom\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.695853 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-scripts\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.695928 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-combined-ca-bundle\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.696005 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7193b97a-2be1-4f8f-9e84-abb09908f78c-config-data-custom\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.696129 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7193b97a-2be1-4f8f-9e84-abb09908f78c-config-data\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.696286 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j9qmp\" (UniqueName: \"kubernetes.io/projected/7193b97a-2be1-4f8f-9e84-abb09908f78c-kube-api-access-j9qmp\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.696349 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-internal-tls-certs\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.701560 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-public-tls-certs\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.703302 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-combined-ca-bundle\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.713583 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-scripts\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.714109 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-config-data\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.727946 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d7f10efc-4222-4871-b684-dc482fd27b01-internal-tls-certs\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.735932 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bw4th\" (UniqueName: \"kubernetes.io/projected/d7f10efc-4222-4871-b684-dc482fd27b01-kube-api-access-bw4th\") pod \"placement-64ff96875d-p4n97\" (UID: \"d7f10efc-4222-4871-b684-dc482fd27b01\") " pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.803731 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/463bc99a-ad40-4df5-9b99-d10d0af67cea-config-data-custom\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.803816 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7193b97a-2be1-4f8f-9e84-abb09908f78c-config-data-custom\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.803866 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7193b97a-2be1-4f8f-9e84-abb09908f78c-config-data\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.803911 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j9qmp\" (UniqueName: \"kubernetes.io/projected/7193b97a-2be1-4f8f-9e84-abb09908f78c-kube-api-access-j9qmp\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.803936 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7193b97a-2be1-4f8f-9e84-abb09908f78c-logs\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.803964 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/463bc99a-ad40-4df5-9b99-d10d0af67cea-combined-ca-bundle\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.803987 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjcx4\" (UniqueName: \"kubernetes.io/projected/463bc99a-ad40-4df5-9b99-d10d0af67cea-kube-api-access-fjcx4\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.804022 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/463bc99a-ad40-4df5-9b99-d10d0af67cea-config-data\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.804094 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/463bc99a-ad40-4df5-9b99-d10d0af67cea-logs\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.804135 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7193b97a-2be1-4f8f-9e84-abb09908f78c-combined-ca-bundle\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.816908 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7193b97a-2be1-4f8f-9e84-abb09908f78c-logs\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.817189 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7193b97a-2be1-4f8f-9e84-abb09908f78c-combined-ca-bundle\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.818164 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/7193b97a-2be1-4f8f-9e84-abb09908f78c-config-data-custom\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.826377 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7193b97a-2be1-4f8f-9e84-abb09908f78c-config-data\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.830871 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/463bc99a-ad40-4df5-9b99-d10d0af67cea-config-data-custom\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.831473 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/463bc99a-ad40-4df5-9b99-d10d0af67cea-logs\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.838862 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/463bc99a-ad40-4df5-9b99-d10d0af67cea-combined-ca-bundle\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.840861 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.848521 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/463bc99a-ad40-4df5-9b99-d10d0af67cea-config-data\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.854534 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j9qmp\" (UniqueName: \"kubernetes.io/projected/7193b97a-2be1-4f8f-9e84-abb09908f78c-kube-api-access-j9qmp\") pod \"barbican-keystone-listener-865b64f5bb-fdgzg\" (UID: \"7193b97a-2be1-4f8f-9e84-abb09908f78c\") " pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.859961 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjcx4\" (UniqueName: \"kubernetes.io/projected/463bc99a-ad40-4df5-9b99-d10d0af67cea-kube-api-access-fjcx4\") pod \"barbican-worker-7fdcb7f4c9-g5zfq\" (UID: \"463bc99a-ad40-4df5-9b99-d10d0af67cea\") " pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.871366 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-rnkk5"] Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.872869 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.906081 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.906461 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.906492 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-config\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.906521 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.906808 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-dns-svc\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.906851 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8lsz\" (UniqueName: \"kubernetes.io/projected/12008eb5-3863-4f4d-af56-1f10d685ae3d-kube-api-access-m8lsz\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.908005 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-rnkk5"] Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.934595 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" Nov 25 21:48:07 crc kubenswrapper[4910]: I1125 21:48:07.969743 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.009644 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7c8449c4cd-d4lrg"] Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.010532 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.010715 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.010787 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-config\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.010841 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.010968 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-dns-svc\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.011053 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8lsz\" (UniqueName: \"kubernetes.io/projected/12008eb5-3863-4f4d-af56-1f10d685ae3d-kube-api-access-m8lsz\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.013991 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-dns-swift-storage-0\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.014778 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-ovsdbserver-nb\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.016402 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-config\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.017106 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-ovsdbserver-sb\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.018479 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-dns-svc\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.034862 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.038134 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7c8449c4cd-d4lrg"] Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.043816 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.059950 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8lsz\" (UniqueName: \"kubernetes.io/projected/12008eb5-3863-4f4d-af56-1f10d685ae3d-kube-api-access-m8lsz\") pod \"dnsmasq-dns-688c87cc99-rnkk5\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.115018 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1036586-470f-4688-a73b-b2849eae1c02-logs\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.116311 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-config-data\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.116423 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-combined-ca-bundle\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.116461 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-config-data-custom\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.116515 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-44n8b\" (UniqueName: \"kubernetes.io/projected/b1036586-470f-4688-a73b-b2849eae1c02-kube-api-access-44n8b\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.218353 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-combined-ca-bundle\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.218398 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-config-data-custom\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.218425 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-44n8b\" (UniqueName: \"kubernetes.io/projected/b1036586-470f-4688-a73b-b2849eae1c02-kube-api-access-44n8b\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.218485 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1036586-470f-4688-a73b-b2849eae1c02-logs\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.218579 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-config-data\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.220674 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1036586-470f-4688-a73b-b2849eae1c02-logs\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.222824 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-config-data\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.224337 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-combined-ca-bundle\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.231154 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-config-data-custom\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.238065 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-44n8b\" (UniqueName: \"kubernetes.io/projected/b1036586-470f-4688-a73b-b2849eae1c02-kube-api-access-44n8b\") pod \"barbican-api-7c8449c4cd-d4lrg\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.297413 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.389928 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:08 crc kubenswrapper[4910]: E1125 21:48:08.710152 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="65f64c1b-8090-4f51-9a93-46a36ff28baa" Nov 25 21:48:08 crc kubenswrapper[4910]: I1125 21:48:08.997231 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-865b64f5bb-fdgzg"] Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.020079 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-64ff96875d-p4n97"] Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.115624 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-rnkk5"] Nov 25 21:48:09 crc kubenswrapper[4910]: W1125 21:48:09.129064 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod12008eb5_3863_4f4d_af56_1f10d685ae3d.slice/crio-2b2d2cfcc1989a61fd54b6aa9ad7519fe1d37538a2db821f74f323945297ae52 WatchSource:0}: Error finding container 2b2d2cfcc1989a61fd54b6aa9ad7519fe1d37538a2db821f74f323945297ae52: Status 404 returned error can't find the container with id 2b2d2cfcc1989a61fd54b6aa9ad7519fe1d37538a2db821f74f323945297ae52 Nov 25 21:48:09 crc kubenswrapper[4910]: W1125 21:48:09.134715 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod463bc99a_ad40_4df5_9b99_d10d0af67cea.slice/crio-b7929dd54183ff42ae80da50222edf6e3eb8a65dfecb725d271e8920b03bb7ea WatchSource:0}: Error finding container b7929dd54183ff42ae80da50222edf6e3eb8a65dfecb725d271e8920b03bb7ea: Status 404 returned error can't find the container with id b7929dd54183ff42ae80da50222edf6e3eb8a65dfecb725d271e8920b03bb7ea Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.138174 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7fdcb7f4c9-g5zfq"] Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.262346 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7c8449c4cd-d4lrg"] Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.368061 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" event={"ID":"7193b97a-2be1-4f8f-9e84-abb09908f78c","Type":"ContainerStarted","Data":"37194d33e19a1931d3e15233bc65ba62bfb3b449691a7951bac9abc37739b49c"} Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.369392 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" event={"ID":"463bc99a-ad40-4df5-9b99-d10d0af67cea","Type":"ContainerStarted","Data":"b7929dd54183ff42ae80da50222edf6e3eb8a65dfecb725d271e8920b03bb7ea"} Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.373275 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65f64c1b-8090-4f51-9a93-46a36ff28baa","Type":"ContainerStarted","Data":"e342f6934c0fd32b7812e61b5a9859fdee1cd2d3dcdba80081663731eafb6840"} Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.373471 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.373458 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerName="ceilometer-notification-agent" containerID="cri-o://d0a2c8a6aee8c323f400c7f7d49ea1d181e36f3154256d02c3e70012dbbf9fdf" gracePeriod=30 Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.373536 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerName="sg-core" containerID="cri-o://d7fa58580aab3a327867da73aa97cf4d66f6ffeba4c879755b8c2f96ca548cbd" gracePeriod=30 Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.373540 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerName="proxy-httpd" containerID="cri-o://e342f6934c0fd32b7812e61b5a9859fdee1cd2d3dcdba80081663731eafb6840" gracePeriod=30 Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.381965 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" event={"ID":"12008eb5-3863-4f4d-af56-1f10d685ae3d","Type":"ContainerStarted","Data":"bd80587f409eb17c143d9957c95f25c561acc7e94a8a3bd6e2fdc37e257fddcf"} Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.382257 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" event={"ID":"12008eb5-3863-4f4d-af56-1f10d685ae3d","Type":"ContainerStarted","Data":"2b2d2cfcc1989a61fd54b6aa9ad7519fe1d37538a2db821f74f323945297ae52"} Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.388063 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-64ff96875d-p4n97" event={"ID":"d7f10efc-4222-4871-b684-dc482fd27b01","Type":"ContainerStarted","Data":"3b733a82b5de8cf3ee5427545143469e47dcd03ade49f3b53ec7d94d3ab58002"} Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.388162 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-64ff96875d-p4n97" event={"ID":"d7f10efc-4222-4871-b684-dc482fd27b01","Type":"ContainerStarted","Data":"d62273568dd8eedd3b49c0ce477781fbb1b496cf9842b1dcb075b708a515d370"} Nov 25 21:48:09 crc kubenswrapper[4910]: I1125 21:48:09.390634 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c8449c4cd-d4lrg" event={"ID":"b1036586-470f-4688-a73b-b2849eae1c02","Type":"ContainerStarted","Data":"6fced21950fbb1dc3de47c3ca3d39d05b0197b78f7b78acf049a41058450aff7"} Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.426149 4910 generic.go:334] "Generic (PLEG): container finished" podID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerID="e342f6934c0fd32b7812e61b5a9859fdee1cd2d3dcdba80081663731eafb6840" exitCode=0 Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.426805 4910 generic.go:334] "Generic (PLEG): container finished" podID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerID="d7fa58580aab3a327867da73aa97cf4d66f6ffeba4c879755b8c2f96ca548cbd" exitCode=2 Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.426213 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65f64c1b-8090-4f51-9a93-46a36ff28baa","Type":"ContainerDied","Data":"e342f6934c0fd32b7812e61b5a9859fdee1cd2d3dcdba80081663731eafb6840"} Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.426903 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65f64c1b-8090-4f51-9a93-46a36ff28baa","Type":"ContainerDied","Data":"d7fa58580aab3a327867da73aa97cf4d66f6ffeba4c879755b8c2f96ca548cbd"} Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.439705 4910 generic.go:334] "Generic (PLEG): container finished" podID="12008eb5-3863-4f4d-af56-1f10d685ae3d" containerID="bd80587f409eb17c143d9957c95f25c561acc7e94a8a3bd6e2fdc37e257fddcf" exitCode=0 Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.439839 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" event={"ID":"12008eb5-3863-4f4d-af56-1f10d685ae3d","Type":"ContainerDied","Data":"bd80587f409eb17c143d9957c95f25c561acc7e94a8a3bd6e2fdc37e257fddcf"} Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.455085 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-64ff96875d-p4n97" event={"ID":"d7f10efc-4222-4871-b684-dc482fd27b01","Type":"ContainerStarted","Data":"ce9d927fb12b860296d27508501f2d80b98e38f86222507625f8ab8ef7d708e2"} Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.456291 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.456358 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.470318 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c8449c4cd-d4lrg" event={"ID":"b1036586-470f-4688-a73b-b2849eae1c02","Type":"ContainerStarted","Data":"54a44786b30f9faa0d22af189569a75983882027794fadd099f9b3bcd5cfeeda"} Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.470372 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c8449c4cd-d4lrg" event={"ID":"b1036586-470f-4688-a73b-b2849eae1c02","Type":"ContainerStarted","Data":"fe52d772bd4be9a08daa027563de6a2534c4b03941b5704d73529cf17d9c3225"} Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.471682 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.474819 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.490865 4910 generic.go:334] "Generic (PLEG): container finished" podID="d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4" containerID="d18973c06dc26f92b8f4426573f9376ee1c34a857b14ef2f24c70116170a4388" exitCode=0 Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.490916 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-xx5nj" event={"ID":"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4","Type":"ContainerDied","Data":"d18973c06dc26f92b8f4426573f9376ee1c34a857b14ef2f24c70116170a4388"} Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.520892 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-64ff96875d-p4n97" podStartSLOduration=3.520866513 podStartE2EDuration="3.520866513s" podCreationTimestamp="2025-11-25 21:48:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:10.50229453 +0000 UTC m=+1045.964770842" watchObservedRunningTime="2025-11-25 21:48:10.520866513 +0000 UTC m=+1045.983342835" Nov 25 21:48:10 crc kubenswrapper[4910]: I1125 21:48:10.541588 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7c8449c4cd-d4lrg" podStartSLOduration=3.541565212 podStartE2EDuration="3.541565212s" podCreationTimestamp="2025-11-25 21:48:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:10.536687732 +0000 UTC m=+1045.999164054" watchObservedRunningTime="2025-11-25 21:48:10.541565212 +0000 UTC m=+1046.004041534" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.035335 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-7ccccf649d-9sm5c"] Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.040680 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7ccccf649d-9sm5c"] Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.040803 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.045275 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.045507 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.144580 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-config-data-custom\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.144631 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe39bdcb-17f5-455e-89af-d161d0d651fc-logs\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.144656 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-internal-tls-certs\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.144676 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-public-tls-certs\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.144721 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjc9m\" (UniqueName: \"kubernetes.io/projected/fe39bdcb-17f5-455e-89af-d161d0d651fc-kube-api-access-bjc9m\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.144736 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-combined-ca-bundle\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.144761 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-config-data\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.246453 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjc9m\" (UniqueName: \"kubernetes.io/projected/fe39bdcb-17f5-455e-89af-d161d0d651fc-kube-api-access-bjc9m\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.246501 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-combined-ca-bundle\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.246531 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-config-data\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.246641 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-config-data-custom\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.246659 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe39bdcb-17f5-455e-89af-d161d0d651fc-logs\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.246681 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-internal-tls-certs\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.246699 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-public-tls-certs\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.249174 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fe39bdcb-17f5-455e-89af-d161d0d651fc-logs\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.255009 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-internal-tls-certs\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.256078 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-public-tls-certs\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.257211 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-combined-ca-bundle\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.257358 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-config-data\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.266517 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjc9m\" (UniqueName: \"kubernetes.io/projected/fe39bdcb-17f5-455e-89af-d161d0d651fc-kube-api-access-bjc9m\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.279388 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/fe39bdcb-17f5-455e-89af-d161d0d651fc-config-data-custom\") pod \"barbican-api-7ccccf649d-9sm5c\" (UID: \"fe39bdcb-17f5-455e-89af-d161d0d651fc\") " pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:11 crc kubenswrapper[4910]: I1125 21:48:11.398727 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.153087 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.278272 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-config-data\") pod \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.278816 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-etc-machine-id\") pod \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.279095 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-scripts\") pod \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.279115 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-combined-ca-bundle\") pod \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.279157 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntfr4\" (UniqueName: \"kubernetes.io/projected/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-kube-api-access-ntfr4\") pod \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.279201 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-db-sync-config-data\") pod \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\" (UID: \"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4\") " Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.281620 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4" (UID: "d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.294935 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-scripts" (OuterVolumeSpecName: "scripts") pod "d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4" (UID: "d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.295159 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-kube-api-access-ntfr4" (OuterVolumeSpecName: "kube-api-access-ntfr4") pod "d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4" (UID: "d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4"). InnerVolumeSpecName "kube-api-access-ntfr4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.302683 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4" (UID: "d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.382354 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.382394 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ntfr4\" (UniqueName: \"kubernetes.io/projected/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-kube-api-access-ntfr4\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.382407 4910 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.382418 4910 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.414520 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4" (UID: "d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.442527 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-config-data" (OuterVolumeSpecName: "config-data") pod "d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4" (UID: "d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.484977 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.485018 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.519447 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-xx5nj" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.519445 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-xx5nj" event={"ID":"d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4","Type":"ContainerDied","Data":"26581e806db35109974db675455d7197163b70988b19ca60d0967fba2494f66f"} Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.519551 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="26581e806db35109974db675455d7197163b70988b19ca60d0967fba2494f66f" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.523998 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" event={"ID":"463bc99a-ad40-4df5-9b99-d10d0af67cea","Type":"ContainerStarted","Data":"b290536e3fdbc7a7c613544347b0a062bf36cfe6c34b53c8f0caec43697b6b19"} Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.533041 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" event={"ID":"12008eb5-3863-4f4d-af56-1f10d685ae3d","Type":"ContainerStarted","Data":"2e265d2a43e31e370a291d58d645138a7eb715c3867e057f50c4c4cd545b97b6"} Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.533842 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.558422 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" podStartSLOduration=5.558387273 podStartE2EDuration="5.558387273s" podCreationTimestamp="2025-11-25 21:48:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:12.555642261 +0000 UTC m=+1048.018118583" watchObservedRunningTime="2025-11-25 21:48:12.558387273 +0000 UTC m=+1048.020863595" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.643607 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-7ccccf649d-9sm5c"] Nov 25 21:48:12 crc kubenswrapper[4910]: W1125 21:48:12.646912 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe39bdcb_17f5_455e_89af_d161d0d651fc.slice/crio-bb7fdb8b6b4002d40d3937faa58fbea8ba679181aa41feb3281f07b3d96504fc WatchSource:0}: Error finding container bb7fdb8b6b4002d40d3937faa58fbea8ba679181aa41feb3281f07b3d96504fc: Status 404 returned error can't find the container with id bb7fdb8b6b4002d40d3937faa58fbea8ba679181aa41feb3281f07b3d96504fc Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.845732 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 21:48:12 crc kubenswrapper[4910]: E1125 21:48:12.846571 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4" containerName="cinder-db-sync" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.846590 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4" containerName="cinder-db-sync" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.846788 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4" containerName="cinder-db-sync" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.847757 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.850337 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-ctxhm" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.850870 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.851011 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.851224 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.886417 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.896308 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.896381 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-scripts\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.896402 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.896433 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/247c2c17-ca52-421a-b739-7926362deff2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.896457 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xmcv\" (UniqueName: \"kubernetes.io/projected/247c2c17-ca52-421a-b739-7926362deff2-kube-api-access-8xmcv\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.896502 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-config-data\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.988258 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-rnkk5"] Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.999598 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.999701 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-scripts\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.999727 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.999762 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/247c2c17-ca52-421a-b739-7926362deff2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.999789 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xmcv\" (UniqueName: \"kubernetes.io/projected/247c2c17-ca52-421a-b739-7926362deff2-kube-api-access-8xmcv\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:12 crc kubenswrapper[4910]: I1125 21:48:12.999847 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-config-data\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.002980 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/247c2c17-ca52-421a-b739-7926362deff2-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.004613 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-vdpzb"] Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.005882 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-config-data\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.006549 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.007795 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.014868 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-scripts\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.022256 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.029834 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-vdpzb"] Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.033082 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xmcv\" (UniqueName: \"kubernetes.io/projected/247c2c17-ca52-421a-b739-7926362deff2-kube-api-access-8xmcv\") pod \"cinder-scheduler-0\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.095572 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.097825 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.103964 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.105045 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.105119 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.105164 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-config\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.105206 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzx9r\" (UniqueName: \"kubernetes.io/projected/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-kube-api-access-kzx9r\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.105230 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.105301 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.129071 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.179624 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.206879 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.206928 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2wpx\" (UniqueName: \"kubernetes.io/projected/2484eb65-ca04-42eb-a5fc-fed7998fc762-kube-api-access-n2wpx\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.206963 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.206986 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-scripts\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.207022 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-config-data-custom\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.207048 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-config\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.207098 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzx9r\" (UniqueName: \"kubernetes.io/projected/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-kube-api-access-kzx9r\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.207126 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.207147 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2484eb65-ca04-42eb-a5fc-fed7998fc762-logs\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.207179 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.207220 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.207266 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-config-data\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.207290 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2484eb65-ca04-42eb-a5fc-fed7998fc762-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.209991 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-ovsdbserver-nb\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.210336 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-config\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.210395 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-dns-swift-storage-0\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.210945 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-ovsdbserver-sb\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.211437 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-dns-svc\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.236308 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzx9r\" (UniqueName: \"kubernetes.io/projected/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-kube-api-access-kzx9r\") pod \"dnsmasq-dns-6bb4fc677f-vdpzb\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.309889 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.310025 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-config-data\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.310050 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2484eb65-ca04-42eb-a5fc-fed7998fc762-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.310136 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2wpx\" (UniqueName: \"kubernetes.io/projected/2484eb65-ca04-42eb-a5fc-fed7998fc762-kube-api-access-n2wpx\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.310211 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-scripts\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.310275 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-config-data-custom\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.310327 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2484eb65-ca04-42eb-a5fc-fed7998fc762-logs\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.314444 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2484eb65-ca04-42eb-a5fc-fed7998fc762-etc-machine-id\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.314896 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2484eb65-ca04-42eb-a5fc-fed7998fc762-logs\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.316189 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.318924 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-config-data-custom\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.319650 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-scripts\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.327114 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.328626 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-config-data\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.343860 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2wpx\" (UniqueName: \"kubernetes.io/projected/2484eb65-ca04-42eb-a5fc-fed7998fc762-kube-api-access-n2wpx\") pod \"cinder-api-0\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.427281 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.567127 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" event={"ID":"463bc99a-ad40-4df5-9b99-d10d0af67cea","Type":"ContainerStarted","Data":"c40d855a11e9ec6446240b6d0043a97783aef74b9e25cc50182f2594ce6f2934"} Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.572701 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7ccccf649d-9sm5c" event={"ID":"fe39bdcb-17f5-455e-89af-d161d0d651fc","Type":"ContainerStarted","Data":"4b8d78a69b3a6adb5a22aa122e03738163c49b008a8d020096873be272af45b4"} Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.572744 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7ccccf649d-9sm5c" event={"ID":"fe39bdcb-17f5-455e-89af-d161d0d651fc","Type":"ContainerStarted","Data":"33145691678d2dac2c6adf8981d3a1f01a4ffe3d7a44d35df2536aabdc3c3733"} Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.572756 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7ccccf649d-9sm5c" event={"ID":"fe39bdcb-17f5-455e-89af-d161d0d651fc","Type":"ContainerStarted","Data":"bb7fdb8b6b4002d40d3937faa58fbea8ba679181aa41feb3281f07b3d96504fc"} Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.573535 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.573565 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.575777 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" event={"ID":"7193b97a-2be1-4f8f-9e84-abb09908f78c","Type":"ContainerStarted","Data":"6bcf827cc7047e129e0e6fb7f1627f2a3c1dce6efaf01297273c2880beae55b9"} Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.575838 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" event={"ID":"7193b97a-2be1-4f8f-9e84-abb09908f78c","Type":"ContainerStarted","Data":"58bf8b32c1ac81004cf2653e0761c5682e9d98e6a630a0d269800a7085d83787"} Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.606791 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7fdcb7f4c9-g5zfq" podStartSLOduration=3.6769347850000003 podStartE2EDuration="6.60677084s" podCreationTimestamp="2025-11-25 21:48:07 +0000 UTC" firstStartedPulling="2025-11-25 21:48:09.139069131 +0000 UTC m=+1044.601545443" lastFinishedPulling="2025-11-25 21:48:12.068905176 +0000 UTC m=+1047.531381498" observedRunningTime="2025-11-25 21:48:13.59999918 +0000 UTC m=+1049.062475502" watchObservedRunningTime="2025-11-25 21:48:13.60677084 +0000 UTC m=+1049.069247162" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.630581 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-865b64f5bb-fdgzg" podStartSLOduration=3.5835216560000003 podStartE2EDuration="6.630560841s" podCreationTimestamp="2025-11-25 21:48:07 +0000 UTC" firstStartedPulling="2025-11-25 21:48:09.021905082 +0000 UTC m=+1044.484381404" lastFinishedPulling="2025-11-25 21:48:12.068944267 +0000 UTC m=+1047.531420589" observedRunningTime="2025-11-25 21:48:13.626893064 +0000 UTC m=+1049.089369386" watchObservedRunningTime="2025-11-25 21:48:13.630560841 +0000 UTC m=+1049.093037153" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.655620 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-7ccccf649d-9sm5c" podStartSLOduration=3.655599015 podStartE2EDuration="3.655599015s" podCreationTimestamp="2025-11-25 21:48:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:13.653191822 +0000 UTC m=+1049.115668144" watchObservedRunningTime="2025-11-25 21:48:13.655599015 +0000 UTC m=+1049.118075337" Nov 25 21:48:13 crc kubenswrapper[4910]: I1125 21:48:13.726532 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 21:48:14 crc kubenswrapper[4910]: W1125 21:48:14.048689 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod02bb38e5_05ca_49a5_a6f1_9af44e9b8fe7.slice/crio-d2259b64c577c53300ea38c48132124f76b6c38854c65f7397ab80c0da193d1f WatchSource:0}: Error finding container d2259b64c577c53300ea38c48132124f76b6c38854c65f7397ab80c0da193d1f: Status 404 returned error can't find the container with id d2259b64c577c53300ea38c48132124f76b6c38854c65f7397ab80c0da193d1f Nov 25 21:48:14 crc kubenswrapper[4910]: I1125 21:48:14.054726 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-vdpzb"] Nov 25 21:48:14 crc kubenswrapper[4910]: W1125 21:48:14.167724 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2484eb65_ca04_42eb_a5fc_fed7998fc762.slice/crio-c2ecf1a52722495a3179712ed48f766bbff20a4146958b2d5cdfa77bca064063 WatchSource:0}: Error finding container c2ecf1a52722495a3179712ed48f766bbff20a4146958b2d5cdfa77bca064063: Status 404 returned error can't find the container with id c2ecf1a52722495a3179712ed48f766bbff20a4146958b2d5cdfa77bca064063 Nov 25 21:48:14 crc kubenswrapper[4910]: I1125 21:48:14.169093 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 21:48:14 crc kubenswrapper[4910]: I1125 21:48:14.598379 4910 generic.go:334] "Generic (PLEG): container finished" podID="02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" containerID="3605e1b1778bd90b1d46db0d58970fee92cdd412c902c599ace17bf8779b9e1e" exitCode=0 Nov 25 21:48:14 crc kubenswrapper[4910]: I1125 21:48:14.598454 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" event={"ID":"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7","Type":"ContainerDied","Data":"3605e1b1778bd90b1d46db0d58970fee92cdd412c902c599ace17bf8779b9e1e"} Nov 25 21:48:14 crc kubenswrapper[4910]: I1125 21:48:14.598871 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" event={"ID":"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7","Type":"ContainerStarted","Data":"d2259b64c577c53300ea38c48132124f76b6c38854c65f7397ab80c0da193d1f"} Nov 25 21:48:14 crc kubenswrapper[4910]: I1125 21:48:14.605873 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2484eb65-ca04-42eb-a5fc-fed7998fc762","Type":"ContainerStarted","Data":"c2ecf1a52722495a3179712ed48f766bbff20a4146958b2d5cdfa77bca064063"} Nov 25 21:48:14 crc kubenswrapper[4910]: I1125 21:48:14.614818 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" podUID="12008eb5-3863-4f4d-af56-1f10d685ae3d" containerName="dnsmasq-dns" containerID="cri-o://2e265d2a43e31e370a291d58d645138a7eb715c3867e057f50c4c4cd545b97b6" gracePeriod=10 Nov 25 21:48:14 crc kubenswrapper[4910]: I1125 21:48:14.615359 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"247c2c17-ca52-421a-b739-7926362deff2","Type":"ContainerStarted","Data":"9860501c3cc14d346471cd9960869ef2692b0d352ac42645a35f06bf0794985b"} Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.248678 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.382665 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-ovsdbserver-sb\") pod \"12008eb5-3863-4f4d-af56-1f10d685ae3d\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.382956 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-dns-swift-storage-0\") pod \"12008eb5-3863-4f4d-af56-1f10d685ae3d\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.383071 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-config\") pod \"12008eb5-3863-4f4d-af56-1f10d685ae3d\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.383221 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8lsz\" (UniqueName: \"kubernetes.io/projected/12008eb5-3863-4f4d-af56-1f10d685ae3d-kube-api-access-m8lsz\") pod \"12008eb5-3863-4f4d-af56-1f10d685ae3d\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.383404 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-dns-svc\") pod \"12008eb5-3863-4f4d-af56-1f10d685ae3d\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.383499 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-ovsdbserver-nb\") pod \"12008eb5-3863-4f4d-af56-1f10d685ae3d\" (UID: \"12008eb5-3863-4f4d-af56-1f10d685ae3d\") " Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.400496 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12008eb5-3863-4f4d-af56-1f10d685ae3d-kube-api-access-m8lsz" (OuterVolumeSpecName: "kube-api-access-m8lsz") pod "12008eb5-3863-4f4d-af56-1f10d685ae3d" (UID: "12008eb5-3863-4f4d-af56-1f10d685ae3d"). InnerVolumeSpecName "kube-api-access-m8lsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.476060 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-config" (OuterVolumeSpecName: "config") pod "12008eb5-3863-4f4d-af56-1f10d685ae3d" (UID: "12008eb5-3863-4f4d-af56-1f10d685ae3d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.486870 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.486897 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8lsz\" (UniqueName: \"kubernetes.io/projected/12008eb5-3863-4f4d-af56-1f10d685ae3d-kube-api-access-m8lsz\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.501919 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "12008eb5-3863-4f4d-af56-1f10d685ae3d" (UID: "12008eb5-3863-4f4d-af56-1f10d685ae3d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.511807 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "12008eb5-3863-4f4d-af56-1f10d685ae3d" (UID: "12008eb5-3863-4f4d-af56-1f10d685ae3d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.560878 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "12008eb5-3863-4f4d-af56-1f10d685ae3d" (UID: "12008eb5-3863-4f4d-af56-1f10d685ae3d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.564379 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "12008eb5-3863-4f4d-af56-1f10d685ae3d" (UID: "12008eb5-3863-4f4d-af56-1f10d685ae3d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.572619 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.588711 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.588750 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.588760 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.588769 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/12008eb5-3863-4f4d-af56-1f10d685ae3d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.647537 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" event={"ID":"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7","Type":"ContainerStarted","Data":"5e034bfb2e7ec8b0f89a5e253c7baab4a4c9b84474ae1d16c4683401e9054436"} Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.647699 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.650936 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2484eb65-ca04-42eb-a5fc-fed7998fc762","Type":"ContainerStarted","Data":"5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b"} Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.665413 4910 generic.go:334] "Generic (PLEG): container finished" podID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerID="d0a2c8a6aee8c323f400c7f7d49ea1d181e36f3154256d02c3e70012dbbf9fdf" exitCode=0 Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.665478 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65f64c1b-8090-4f51-9a93-46a36ff28baa","Type":"ContainerDied","Data":"d0a2c8a6aee8c323f400c7f7d49ea1d181e36f3154256d02c3e70012dbbf9fdf"} Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.665503 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"65f64c1b-8090-4f51-9a93-46a36ff28baa","Type":"ContainerDied","Data":"d14f3de7bca5a06177ef6458ea84c385817edc1674f9318cd8689701116e7014"} Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.665525 4910 scope.go:117] "RemoveContainer" containerID="e342f6934c0fd32b7812e61b5a9859fdee1cd2d3dcdba80081663731eafb6840" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.665702 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.680033 4910 generic.go:334] "Generic (PLEG): container finished" podID="12008eb5-3863-4f4d-af56-1f10d685ae3d" containerID="2e265d2a43e31e370a291d58d645138a7eb715c3867e057f50c4c4cd545b97b6" exitCode=0 Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.680099 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" event={"ID":"12008eb5-3863-4f4d-af56-1f10d685ae3d","Type":"ContainerDied","Data":"2e265d2a43e31e370a291d58d645138a7eb715c3867e057f50c4c4cd545b97b6"} Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.680147 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" event={"ID":"12008eb5-3863-4f4d-af56-1f10d685ae3d","Type":"ContainerDied","Data":"2b2d2cfcc1989a61fd54b6aa9ad7519fe1d37538a2db821f74f323945297ae52"} Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.680253 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688c87cc99-rnkk5" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.690777 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-sg-core-conf-yaml\") pod \"65f64c1b-8090-4f51-9a93-46a36ff28baa\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.690849 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65f64c1b-8090-4f51-9a93-46a36ff28baa-log-httpd\") pod \"65f64c1b-8090-4f51-9a93-46a36ff28baa\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.690978 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-combined-ca-bundle\") pod \"65f64c1b-8090-4f51-9a93-46a36ff28baa\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.691026 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-config-data\") pod \"65f64c1b-8090-4f51-9a93-46a36ff28baa\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.691118 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-scripts\") pod \"65f64c1b-8090-4f51-9a93-46a36ff28baa\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.691204 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzqdt\" (UniqueName: \"kubernetes.io/projected/65f64c1b-8090-4f51-9a93-46a36ff28baa-kube-api-access-bzqdt\") pod \"65f64c1b-8090-4f51-9a93-46a36ff28baa\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.691268 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65f64c1b-8090-4f51-9a93-46a36ff28baa-run-httpd\") pod \"65f64c1b-8090-4f51-9a93-46a36ff28baa\" (UID: \"65f64c1b-8090-4f51-9a93-46a36ff28baa\") " Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.692100 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65f64c1b-8090-4f51-9a93-46a36ff28baa-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "65f64c1b-8090-4f51-9a93-46a36ff28baa" (UID: "65f64c1b-8090-4f51-9a93-46a36ff28baa"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.693860 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/65f64c1b-8090-4f51-9a93-46a36ff28baa-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "65f64c1b-8090-4f51-9a93-46a36ff28baa" (UID: "65f64c1b-8090-4f51-9a93-46a36ff28baa"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.709640 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-scripts" (OuterVolumeSpecName: "scripts") pod "65f64c1b-8090-4f51-9a93-46a36ff28baa" (UID: "65f64c1b-8090-4f51-9a93-46a36ff28baa"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.713054 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65f64c1b-8090-4f51-9a93-46a36ff28baa-kube-api-access-bzqdt" (OuterVolumeSpecName: "kube-api-access-bzqdt") pod "65f64c1b-8090-4f51-9a93-46a36ff28baa" (UID: "65f64c1b-8090-4f51-9a93-46a36ff28baa"). InnerVolumeSpecName "kube-api-access-bzqdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.738212 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" podStartSLOduration=3.738182851 podStartE2EDuration="3.738182851s" podCreationTimestamp="2025-11-25 21:48:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:15.676651348 +0000 UTC m=+1051.139127670" watchObservedRunningTime="2025-11-25 21:48:15.738182851 +0000 UTC m=+1051.200659163" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.739401 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.754811 4910 scope.go:117] "RemoveContainer" containerID="d7fa58580aab3a327867da73aa97cf4d66f6ffeba4c879755b8c2f96ca548cbd" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.761681 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-rnkk5"] Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.782924 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-688c87cc99-rnkk5"] Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.784914 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "65f64c1b-8090-4f51-9a93-46a36ff28baa" (UID: "65f64c1b-8090-4f51-9a93-46a36ff28baa"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.796301 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.796522 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzqdt\" (UniqueName: \"kubernetes.io/projected/65f64c1b-8090-4f51-9a93-46a36ff28baa-kube-api-access-bzqdt\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.796598 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65f64c1b-8090-4f51-9a93-46a36ff28baa-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.796657 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.796710 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/65f64c1b-8090-4f51-9a93-46a36ff28baa-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.824496 4910 scope.go:117] "RemoveContainer" containerID="d0a2c8a6aee8c323f400c7f7d49ea1d181e36f3154256d02c3e70012dbbf9fdf" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.833543 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "65f64c1b-8090-4f51-9a93-46a36ff28baa" (UID: "65f64c1b-8090-4f51-9a93-46a36ff28baa"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.842363 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-config-data" (OuterVolumeSpecName: "config-data") pod "65f64c1b-8090-4f51-9a93-46a36ff28baa" (UID: "65f64c1b-8090-4f51-9a93-46a36ff28baa"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.898394 4910 scope.go:117] "RemoveContainer" containerID="e342f6934c0fd32b7812e61b5a9859fdee1cd2d3dcdba80081663731eafb6840" Nov 25 21:48:15 crc kubenswrapper[4910]: E1125 21:48:15.899147 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e342f6934c0fd32b7812e61b5a9859fdee1cd2d3dcdba80081663731eafb6840\": container with ID starting with e342f6934c0fd32b7812e61b5a9859fdee1cd2d3dcdba80081663731eafb6840 not found: ID does not exist" containerID="e342f6934c0fd32b7812e61b5a9859fdee1cd2d3dcdba80081663731eafb6840" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.899188 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e342f6934c0fd32b7812e61b5a9859fdee1cd2d3dcdba80081663731eafb6840"} err="failed to get container status \"e342f6934c0fd32b7812e61b5a9859fdee1cd2d3dcdba80081663731eafb6840\": rpc error: code = NotFound desc = could not find container \"e342f6934c0fd32b7812e61b5a9859fdee1cd2d3dcdba80081663731eafb6840\": container with ID starting with e342f6934c0fd32b7812e61b5a9859fdee1cd2d3dcdba80081663731eafb6840 not found: ID does not exist" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.899217 4910 scope.go:117] "RemoveContainer" containerID="d7fa58580aab3a327867da73aa97cf4d66f6ffeba4c879755b8c2f96ca548cbd" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.900061 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.900168 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/65f64c1b-8090-4f51-9a93-46a36ff28baa-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:15 crc kubenswrapper[4910]: E1125 21:48:15.905621 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7fa58580aab3a327867da73aa97cf4d66f6ffeba4c879755b8c2f96ca548cbd\": container with ID starting with d7fa58580aab3a327867da73aa97cf4d66f6ffeba4c879755b8c2f96ca548cbd not found: ID does not exist" containerID="d7fa58580aab3a327867da73aa97cf4d66f6ffeba4c879755b8c2f96ca548cbd" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.905674 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7fa58580aab3a327867da73aa97cf4d66f6ffeba4c879755b8c2f96ca548cbd"} err="failed to get container status \"d7fa58580aab3a327867da73aa97cf4d66f6ffeba4c879755b8c2f96ca548cbd\": rpc error: code = NotFound desc = could not find container \"d7fa58580aab3a327867da73aa97cf4d66f6ffeba4c879755b8c2f96ca548cbd\": container with ID starting with d7fa58580aab3a327867da73aa97cf4d66f6ffeba4c879755b8c2f96ca548cbd not found: ID does not exist" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.905711 4910 scope.go:117] "RemoveContainer" containerID="d0a2c8a6aee8c323f400c7f7d49ea1d181e36f3154256d02c3e70012dbbf9fdf" Nov 25 21:48:15 crc kubenswrapper[4910]: E1125 21:48:15.909947 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d0a2c8a6aee8c323f400c7f7d49ea1d181e36f3154256d02c3e70012dbbf9fdf\": container with ID starting with d0a2c8a6aee8c323f400c7f7d49ea1d181e36f3154256d02c3e70012dbbf9fdf not found: ID does not exist" containerID="d0a2c8a6aee8c323f400c7f7d49ea1d181e36f3154256d02c3e70012dbbf9fdf" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.909991 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d0a2c8a6aee8c323f400c7f7d49ea1d181e36f3154256d02c3e70012dbbf9fdf"} err="failed to get container status \"d0a2c8a6aee8c323f400c7f7d49ea1d181e36f3154256d02c3e70012dbbf9fdf\": rpc error: code = NotFound desc = could not find container \"d0a2c8a6aee8c323f400c7f7d49ea1d181e36f3154256d02c3e70012dbbf9fdf\": container with ID starting with d0a2c8a6aee8c323f400c7f7d49ea1d181e36f3154256d02c3e70012dbbf9fdf not found: ID does not exist" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.910015 4910 scope.go:117] "RemoveContainer" containerID="2e265d2a43e31e370a291d58d645138a7eb715c3867e057f50c4c4cd545b97b6" Nov 25 21:48:15 crc kubenswrapper[4910]: I1125 21:48:15.966885 4910 scope.go:117] "RemoveContainer" containerID="bd80587f409eb17c143d9957c95f25c561acc7e94a8a3bd6e2fdc37e257fddcf" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.104461 4910 scope.go:117] "RemoveContainer" containerID="2e265d2a43e31e370a291d58d645138a7eb715c3867e057f50c4c4cd545b97b6" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.107178 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:16 crc kubenswrapper[4910]: E1125 21:48:16.124444 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e265d2a43e31e370a291d58d645138a7eb715c3867e057f50c4c4cd545b97b6\": container with ID starting with 2e265d2a43e31e370a291d58d645138a7eb715c3867e057f50c4c4cd545b97b6 not found: ID does not exist" containerID="2e265d2a43e31e370a291d58d645138a7eb715c3867e057f50c4c4cd545b97b6" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.124501 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e265d2a43e31e370a291d58d645138a7eb715c3867e057f50c4c4cd545b97b6"} err="failed to get container status \"2e265d2a43e31e370a291d58d645138a7eb715c3867e057f50c4c4cd545b97b6\": rpc error: code = NotFound desc = could not find container \"2e265d2a43e31e370a291d58d645138a7eb715c3867e057f50c4c4cd545b97b6\": container with ID starting with 2e265d2a43e31e370a291d58d645138a7eb715c3867e057f50c4c4cd545b97b6 not found: ID does not exist" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.124535 4910 scope.go:117] "RemoveContainer" containerID="bd80587f409eb17c143d9957c95f25c561acc7e94a8a3bd6e2fdc37e257fddcf" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.126646 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:16 crc kubenswrapper[4910]: E1125 21:48:16.129855 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd80587f409eb17c143d9957c95f25c561acc7e94a8a3bd6e2fdc37e257fddcf\": container with ID starting with bd80587f409eb17c143d9957c95f25c561acc7e94a8a3bd6e2fdc37e257fddcf not found: ID does not exist" containerID="bd80587f409eb17c143d9957c95f25c561acc7e94a8a3bd6e2fdc37e257fddcf" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.129906 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd80587f409eb17c143d9957c95f25c561acc7e94a8a3bd6e2fdc37e257fddcf"} err="failed to get container status \"bd80587f409eb17c143d9957c95f25c561acc7e94a8a3bd6e2fdc37e257fddcf\": rpc error: code = NotFound desc = could not find container \"bd80587f409eb17c143d9957c95f25c561acc7e94a8a3bd6e2fdc37e257fddcf\": container with ID starting with bd80587f409eb17c143d9957c95f25c561acc7e94a8a3bd6e2fdc37e257fddcf not found: ID does not exist" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.138963 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:16 crc kubenswrapper[4910]: E1125 21:48:16.139598 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerName="sg-core" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.139611 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerName="sg-core" Nov 25 21:48:16 crc kubenswrapper[4910]: E1125 21:48:16.139621 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerName="proxy-httpd" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.139629 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerName="proxy-httpd" Nov 25 21:48:16 crc kubenswrapper[4910]: E1125 21:48:16.139645 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12008eb5-3863-4f4d-af56-1f10d685ae3d" containerName="init" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.139652 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="12008eb5-3863-4f4d-af56-1f10d685ae3d" containerName="init" Nov 25 21:48:16 crc kubenswrapper[4910]: E1125 21:48:16.139673 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerName="ceilometer-notification-agent" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.139680 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerName="ceilometer-notification-agent" Nov 25 21:48:16 crc kubenswrapper[4910]: E1125 21:48:16.139691 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12008eb5-3863-4f4d-af56-1f10d685ae3d" containerName="dnsmasq-dns" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.139697 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="12008eb5-3863-4f4d-af56-1f10d685ae3d" containerName="dnsmasq-dns" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.139892 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerName="ceilometer-notification-agent" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.139909 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="12008eb5-3863-4f4d-af56-1f10d685ae3d" containerName="dnsmasq-dns" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.139928 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerName="proxy-httpd" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.139939 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="65f64c1b-8090-4f51-9a93-46a36ff28baa" containerName="sg-core" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.141899 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.147112 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.147130 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.160867 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.308765 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-scripts\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.308853 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-config-data\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.308891 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5810cdb3-08d2-433b-b43e-ae16a13f108c-run-httpd\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.309802 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.309869 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nlx6\" (UniqueName: \"kubernetes.io/projected/5810cdb3-08d2-433b-b43e-ae16a13f108c-kube-api-access-5nlx6\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.309959 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5810cdb3-08d2-433b-b43e-ae16a13f108c-log-httpd\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.310022 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.411950 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-scripts\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.412048 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-config-data\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.412085 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5810cdb3-08d2-433b-b43e-ae16a13f108c-run-httpd\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.412109 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.412137 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nlx6\" (UniqueName: \"kubernetes.io/projected/5810cdb3-08d2-433b-b43e-ae16a13f108c-kube-api-access-5nlx6\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.412188 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5810cdb3-08d2-433b-b43e-ae16a13f108c-log-httpd\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.412228 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.413402 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5810cdb3-08d2-433b-b43e-ae16a13f108c-run-httpd\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.413868 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5810cdb3-08d2-433b-b43e-ae16a13f108c-log-httpd\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.420087 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.420750 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-scripts\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.420784 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-config-data\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.438968 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nlx6\" (UniqueName: \"kubernetes.io/projected/5810cdb3-08d2-433b-b43e-ae16a13f108c-kube-api-access-5nlx6\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.458235 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.513789 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.740422 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2484eb65-ca04-42eb-a5fc-fed7998fc762","Type":"ContainerStarted","Data":"f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1"} Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.741101 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="2484eb65-ca04-42eb-a5fc-fed7998fc762" containerName="cinder-api-log" containerID="cri-o://5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b" gracePeriod=30 Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.741785 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.742127 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="2484eb65-ca04-42eb-a5fc-fed7998fc762" containerName="cinder-api" containerID="cri-o://f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1" gracePeriod=30 Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.780597 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"247c2c17-ca52-421a-b739-7926362deff2","Type":"ContainerStarted","Data":"8e16999319433e65c81038f2be29d132d8805f8aba58a7e0772891b57b9e6e5a"} Nov 25 21:48:16 crc kubenswrapper[4910]: I1125 21:48:16.788525 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.788496939 podStartE2EDuration="3.788496939s" podCreationTimestamp="2025-11-25 21:48:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:16.761311967 +0000 UTC m=+1052.223788289" watchObservedRunningTime="2025-11-25 21:48:16.788496939 +0000 UTC m=+1052.250973251" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.120155 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.121771 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.229509 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12008eb5-3863-4f4d-af56-1f10d685ae3d" path="/var/lib/kubelet/pods/12008eb5-3863-4f4d-af56-1f10d685ae3d/volumes" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.230577 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="65f64c1b-8090-4f51-9a93-46a36ff28baa" path="/var/lib/kubelet/pods/65f64c1b-8090-4f51-9a93-46a36ff28baa/volumes" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.498519 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.648104 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-config-data-custom\") pod \"2484eb65-ca04-42eb-a5fc-fed7998fc762\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.648192 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2484eb65-ca04-42eb-a5fc-fed7998fc762-etc-machine-id\") pod \"2484eb65-ca04-42eb-a5fc-fed7998fc762\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.648266 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-config-data\") pod \"2484eb65-ca04-42eb-a5fc-fed7998fc762\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.648287 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-scripts\") pod \"2484eb65-ca04-42eb-a5fc-fed7998fc762\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.648333 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-combined-ca-bundle\") pod \"2484eb65-ca04-42eb-a5fc-fed7998fc762\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.648375 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2wpx\" (UniqueName: \"kubernetes.io/projected/2484eb65-ca04-42eb-a5fc-fed7998fc762-kube-api-access-n2wpx\") pod \"2484eb65-ca04-42eb-a5fc-fed7998fc762\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.648502 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2484eb65-ca04-42eb-a5fc-fed7998fc762-logs\") pod \"2484eb65-ca04-42eb-a5fc-fed7998fc762\" (UID: \"2484eb65-ca04-42eb-a5fc-fed7998fc762\") " Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.650755 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2484eb65-ca04-42eb-a5fc-fed7998fc762-logs" (OuterVolumeSpecName: "logs") pod "2484eb65-ca04-42eb-a5fc-fed7998fc762" (UID: "2484eb65-ca04-42eb-a5fc-fed7998fc762"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.652335 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2484eb65-ca04-42eb-a5fc-fed7998fc762-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "2484eb65-ca04-42eb-a5fc-fed7998fc762" (UID: "2484eb65-ca04-42eb-a5fc-fed7998fc762"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.663724 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-scripts" (OuterVolumeSpecName: "scripts") pod "2484eb65-ca04-42eb-a5fc-fed7998fc762" (UID: "2484eb65-ca04-42eb-a5fc-fed7998fc762"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.664345 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2484eb65-ca04-42eb-a5fc-fed7998fc762-kube-api-access-n2wpx" (OuterVolumeSpecName: "kube-api-access-n2wpx") pod "2484eb65-ca04-42eb-a5fc-fed7998fc762" (UID: "2484eb65-ca04-42eb-a5fc-fed7998fc762"). InnerVolumeSpecName "kube-api-access-n2wpx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.673869 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2484eb65-ca04-42eb-a5fc-fed7998fc762" (UID: "2484eb65-ca04-42eb-a5fc-fed7998fc762"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.738497 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2484eb65-ca04-42eb-a5fc-fed7998fc762" (UID: "2484eb65-ca04-42eb-a5fc-fed7998fc762"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.756510 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.756549 4910 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/2484eb65-ca04-42eb-a5fc-fed7998fc762-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.756579 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.756588 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.756598 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2wpx\" (UniqueName: \"kubernetes.io/projected/2484eb65-ca04-42eb-a5fc-fed7998fc762-kube-api-access-n2wpx\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.756609 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2484eb65-ca04-42eb-a5fc-fed7998fc762-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.841653 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"247c2c17-ca52-421a-b739-7926362deff2","Type":"ContainerStarted","Data":"792d60c584649dd5f7142381aff44003a3dd2ae111fef3c888d73c485c8443b3"} Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.873495 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5810cdb3-08d2-433b-b43e-ae16a13f108c","Type":"ContainerStarted","Data":"089c8b7821e29a52a7d285918936f7c5f071eb7f1bdda530378bd849c3ca9dfe"} Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.875927 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.674248207 podStartE2EDuration="5.8759036s" podCreationTimestamp="2025-11-25 21:48:12 +0000 UTC" firstStartedPulling="2025-11-25 21:48:13.740929889 +0000 UTC m=+1049.203406201" lastFinishedPulling="2025-11-25 21:48:14.942585272 +0000 UTC m=+1050.405061594" observedRunningTime="2025-11-25 21:48:17.873460515 +0000 UTC m=+1053.335936837" watchObservedRunningTime="2025-11-25 21:48:17.8759036 +0000 UTC m=+1053.338379922" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.891100 4910 generic.go:334] "Generic (PLEG): container finished" podID="2484eb65-ca04-42eb-a5fc-fed7998fc762" containerID="f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1" exitCode=0 Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.891386 4910 generic.go:334] "Generic (PLEG): container finished" podID="2484eb65-ca04-42eb-a5fc-fed7998fc762" containerID="5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b" exitCode=143 Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.891451 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-config-data" (OuterVolumeSpecName: "config-data") pod "2484eb65-ca04-42eb-a5fc-fed7998fc762" (UID: "2484eb65-ca04-42eb-a5fc-fed7998fc762"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.891594 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.891694 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2484eb65-ca04-42eb-a5fc-fed7998fc762","Type":"ContainerDied","Data":"f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1"} Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.891799 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2484eb65-ca04-42eb-a5fc-fed7998fc762","Type":"ContainerDied","Data":"5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b"} Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.891884 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"2484eb65-ca04-42eb-a5fc-fed7998fc762","Type":"ContainerDied","Data":"c2ecf1a52722495a3179712ed48f766bbff20a4146958b2d5cdfa77bca064063"} Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.891971 4910 scope.go:117] "RemoveContainer" containerID="f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.949348 4910 scope.go:117] "RemoveContainer" containerID="5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.959589 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.965708 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2484eb65-ca04-42eb-a5fc-fed7998fc762-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.968308 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.975360 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 25 21:48:17 crc kubenswrapper[4910]: E1125 21:48:17.975838 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2484eb65-ca04-42eb-a5fc-fed7998fc762" containerName="cinder-api" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.975903 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2484eb65-ca04-42eb-a5fc-fed7998fc762" containerName="cinder-api" Nov 25 21:48:17 crc kubenswrapper[4910]: E1125 21:48:17.975989 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2484eb65-ca04-42eb-a5fc-fed7998fc762" containerName="cinder-api-log" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.976062 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2484eb65-ca04-42eb-a5fc-fed7998fc762" containerName="cinder-api-log" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.976312 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2484eb65-ca04-42eb-a5fc-fed7998fc762" containerName="cinder-api-log" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.976403 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2484eb65-ca04-42eb-a5fc-fed7998fc762" containerName="cinder-api" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.977723 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.982490 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.982550 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 25 21:48:17 crc kubenswrapper[4910]: I1125 21:48:17.982784 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.008519 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.038142 4910 scope.go:117] "RemoveContainer" containerID="f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1" Nov 25 21:48:18 crc kubenswrapper[4910]: E1125 21:48:18.047436 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1\": container with ID starting with f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1 not found: ID does not exist" containerID="f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.047477 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1"} err="failed to get container status \"f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1\": rpc error: code = NotFound desc = could not find container \"f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1\": container with ID starting with f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1 not found: ID does not exist" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.047500 4910 scope.go:117] "RemoveContainer" containerID="5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b" Nov 25 21:48:18 crc kubenswrapper[4910]: E1125 21:48:18.047849 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b\": container with ID starting with 5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b not found: ID does not exist" containerID="5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.047896 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b"} err="failed to get container status \"5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b\": rpc error: code = NotFound desc = could not find container \"5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b\": container with ID starting with 5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b not found: ID does not exist" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.047925 4910 scope.go:117] "RemoveContainer" containerID="f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.048594 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1"} err="failed to get container status \"f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1\": rpc error: code = NotFound desc = could not find container \"f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1\": container with ID starting with f5b6af6013e385ff3a971dc8a86efef7a4a8c2feb26f958ddc37831e4b197af1 not found: ID does not exist" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.048623 4910 scope.go:117] "RemoveContainer" containerID="5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.049017 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b"} err="failed to get container status \"5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b\": rpc error: code = NotFound desc = could not find container \"5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b\": container with ID starting with 5e3f70ecf636065bd7ff45ce2bff35a9dc32fbd9dd021c67f201b3327fbacd3b not found: ID does not exist" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.069913 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-config-data-custom\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.069997 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-public-tls-certs\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.070040 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24fe78bf-5d43-4896-b226-8d33a8856a13-logs\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.070078 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/24fe78bf-5d43-4896-b226-8d33a8856a13-etc-machine-id\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.070113 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.070149 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-scripts\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.070191 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-config-data\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.070258 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mv7b9\" (UniqueName: \"kubernetes.io/projected/24fe78bf-5d43-4896-b226-8d33a8856a13-kube-api-access-mv7b9\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.070273 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.171864 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-public-tls-certs\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.171927 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/24fe78bf-5d43-4896-b226-8d33a8856a13-etc-machine-id\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.171947 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24fe78bf-5d43-4896-b226-8d33a8856a13-logs\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.171981 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.171998 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-scripts\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.172040 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-config-data\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.172072 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mv7b9\" (UniqueName: \"kubernetes.io/projected/24fe78bf-5d43-4896-b226-8d33a8856a13-kube-api-access-mv7b9\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.172087 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.172127 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-config-data-custom\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.172380 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/24fe78bf-5d43-4896-b226-8d33a8856a13-etc-machine-id\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.176862 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/24fe78bf-5d43-4896-b226-8d33a8856a13-logs\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.181654 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.183544 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-config-data-custom\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.185667 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-scripts\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.186858 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.188100 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-public-tls-certs\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.192356 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-config-data\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.193501 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/24fe78bf-5d43-4896-b226-8d33a8856a13-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.202292 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mv7b9\" (UniqueName: \"kubernetes.io/projected/24fe78bf-5d43-4896-b226-8d33a8856a13-kube-api-access-mv7b9\") pod \"cinder-api-0\" (UID: \"24fe78bf-5d43-4896-b226-8d33a8856a13\") " pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.337086 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.626333 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.794753 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.892955 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.929343 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a49af42e-3a80-4f2a-9b4b-f43946a32c49-scripts\") pod \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.929723 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a49af42e-3a80-4f2a-9b4b-f43946a32c49-config-data\") pod \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.929952 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a49af42e-3a80-4f2a-9b4b-f43946a32c49-horizon-secret-key\") pod \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.929988 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6r8dw\" (UniqueName: \"kubernetes.io/projected/a49af42e-3a80-4f2a-9b4b-f43946a32c49-kube-api-access-6r8dw\") pod \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.930048 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a49af42e-3a80-4f2a-9b4b-f43946a32c49-logs\") pod \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\" (UID: \"a49af42e-3a80-4f2a-9b4b-f43946a32c49\") " Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.935033 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a49af42e-3a80-4f2a-9b4b-f43946a32c49-logs" (OuterVolumeSpecName: "logs") pod "a49af42e-3a80-4f2a-9b4b-f43946a32c49" (UID: "a49af42e-3a80-4f2a-9b4b-f43946a32c49"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.937661 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a49af42e-3a80-4f2a-9b4b-f43946a32c49-kube-api-access-6r8dw" (OuterVolumeSpecName: "kube-api-access-6r8dw") pod "a49af42e-3a80-4f2a-9b4b-f43946a32c49" (UID: "a49af42e-3a80-4f2a-9b4b-f43946a32c49"). InnerVolumeSpecName "kube-api-access-6r8dw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.960217 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a49af42e-3a80-4f2a-9b4b-f43946a32c49-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "a49af42e-3a80-4f2a-9b4b-f43946a32c49" (UID: "a49af42e-3a80-4f2a-9b4b-f43946a32c49"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.960912 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a49af42e-3a80-4f2a-9b4b-f43946a32c49-scripts" (OuterVolumeSpecName: "scripts") pod "a49af42e-3a80-4f2a-9b4b-f43946a32c49" (UID: "a49af42e-3a80-4f2a-9b4b-f43946a32c49"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.961456 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6r8dw\" (UniqueName: \"kubernetes.io/projected/a49af42e-3a80-4f2a-9b4b-f43946a32c49-kube-api-access-6r8dw\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:18 crc kubenswrapper[4910]: I1125 21:48:18.961488 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a49af42e-3a80-4f2a-9b4b-f43946a32c49-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.019160 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a49af42e-3a80-4f2a-9b4b-f43946a32c49-config-data" (OuterVolumeSpecName: "config-data") pod "a49af42e-3a80-4f2a-9b4b-f43946a32c49" (UID: "a49af42e-3a80-4f2a-9b4b-f43946a32c49"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.065712 4910 generic.go:334] "Generic (PLEG): container finished" podID="a49af42e-3a80-4f2a-9b4b-f43946a32c49" containerID="ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c" exitCode=137 Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.065748 4910 generic.go:334] "Generic (PLEG): container finished" podID="a49af42e-3a80-4f2a-9b4b-f43946a32c49" containerID="b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940" exitCode=137 Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.065801 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79b94b5d95-k97q6" event={"ID":"a49af42e-3a80-4f2a-9b4b-f43946a32c49","Type":"ContainerDied","Data":"ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c"} Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.065833 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79b94b5d95-k97q6" event={"ID":"a49af42e-3a80-4f2a-9b4b-f43946a32c49","Type":"ContainerDied","Data":"b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940"} Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.065844 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-79b94b5d95-k97q6" event={"ID":"a49af42e-3a80-4f2a-9b4b-f43946a32c49","Type":"ContainerDied","Data":"dbee99a9efca638025b0159da0cf4330d32f82a9b0d671f132609b3c69329022"} Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.065860 4910 scope.go:117] "RemoveContainer" containerID="ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.066077 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-79b94b5d95-k97q6" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.067621 4910 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/a49af42e-3a80-4f2a-9b4b-f43946a32c49-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.067641 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/a49af42e-3a80-4f2a-9b4b-f43946a32c49-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.067651 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/a49af42e-3a80-4f2a-9b4b-f43946a32c49-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.068934 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.078349 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5810cdb3-08d2-433b-b43e-ae16a13f108c","Type":"ContainerStarted","Data":"122cff3a8c7d363474cb5a00f4df0302716e6a8a476575acc5474e6e1e7d5588"} Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.142296 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-79b94b5d95-k97q6"] Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.147276 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-79b94b5d95-k97q6"] Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.221892 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2484eb65-ca04-42eb-a5fc-fed7998fc762" path="/var/lib/kubelet/pods/2484eb65-ca04-42eb-a5fc-fed7998fc762/volumes" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.222756 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a49af42e-3a80-4f2a-9b4b-f43946a32c49" path="/var/lib/kubelet/pods/a49af42e-3a80-4f2a-9b4b-f43946a32c49/volumes" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.298140 4910 scope.go:117] "RemoveContainer" containerID="b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.342973 4910 scope.go:117] "RemoveContainer" containerID="ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c" Nov 25 21:48:19 crc kubenswrapper[4910]: E1125 21:48:19.344203 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c\": container with ID starting with ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c not found: ID does not exist" containerID="ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.344288 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c"} err="failed to get container status \"ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c\": rpc error: code = NotFound desc = could not find container \"ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c\": container with ID starting with ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c not found: ID does not exist" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.344316 4910 scope.go:117] "RemoveContainer" containerID="b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940" Nov 25 21:48:19 crc kubenswrapper[4910]: E1125 21:48:19.351649 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940\": container with ID starting with b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940 not found: ID does not exist" containerID="b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.351715 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940"} err="failed to get container status \"b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940\": rpc error: code = NotFound desc = could not find container \"b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940\": container with ID starting with b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940 not found: ID does not exist" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.351757 4910 scope.go:117] "RemoveContainer" containerID="ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.352169 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c"} err="failed to get container status \"ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c\": rpc error: code = NotFound desc = could not find container \"ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c\": container with ID starting with ffb325fc582792407b8adeef029b9395826061efa75478e616856de74345378c not found: ID does not exist" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.352214 4910 scope.go:117] "RemoveContainer" containerID="b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.353587 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940"} err="failed to get container status \"b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940\": rpc error: code = NotFound desc = could not find container \"b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940\": container with ID starting with b31445eaa9cfe002d41ea46f1192472caa9c02cd818c8562191888e3f7e07940 not found: ID does not exist" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.724722 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-667bcb4bc9-bl288" Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.829925 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6b94985954-h6p9h"] Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.830170 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6b94985954-h6p9h" podUID="ce75c296-74f5-4f94-bcdf-58bb1d44f445" containerName="neutron-api" containerID="cri-o://2831751b92e100fb850ae3355d63ca71ab3c40b2f6e467623d701b84f49da36e" gracePeriod=30 Nov 25 21:48:19 crc kubenswrapper[4910]: I1125 21:48:19.830387 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6b94985954-h6p9h" podUID="ce75c296-74f5-4f94-bcdf-58bb1d44f445" containerName="neutron-httpd" containerID="cri-o://83a4db89007a69a8fb58c5a4a438dd5ae705dd0757f3686acaede6daf5df836c" gracePeriod=30 Nov 25 21:48:20 crc kubenswrapper[4910]: I1125 21:48:20.109641 4910 generic.go:334] "Generic (PLEG): container finished" podID="ce75c296-74f5-4f94-bcdf-58bb1d44f445" containerID="83a4db89007a69a8fb58c5a4a438dd5ae705dd0757f3686acaede6daf5df836c" exitCode=0 Nov 25 21:48:20 crc kubenswrapper[4910]: I1125 21:48:20.109797 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b94985954-h6p9h" event={"ID":"ce75c296-74f5-4f94-bcdf-58bb1d44f445","Type":"ContainerDied","Data":"83a4db89007a69a8fb58c5a4a438dd5ae705dd0757f3686acaede6daf5df836c"} Nov 25 21:48:20 crc kubenswrapper[4910]: I1125 21:48:20.123709 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5810cdb3-08d2-433b-b43e-ae16a13f108c","Type":"ContainerStarted","Data":"633551ab5b12cf043c15a2de1981631a863ed3792a2a8b8cd89951c8a8c8fc9e"} Nov 25 21:48:20 crc kubenswrapper[4910]: I1125 21:48:20.130258 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"24fe78bf-5d43-4896-b226-8d33a8856a13","Type":"ContainerStarted","Data":"05b5f1d08ccd906b4c4ff5e7704f4aa627541dbf371179e07ead9b5e9283c884"} Nov 25 21:48:21 crc kubenswrapper[4910]: I1125 21:48:21.155451 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5810cdb3-08d2-433b-b43e-ae16a13f108c","Type":"ContainerStarted","Data":"c03ac0d89a8ae2ab92700224308d02b40b3a1add69c76e5af89f688958bd7d92"} Nov 25 21:48:21 crc kubenswrapper[4910]: I1125 21:48:21.156839 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"24fe78bf-5d43-4896-b226-8d33a8856a13","Type":"ContainerStarted","Data":"826d264fea8186740912878fb952a83cfa2488bbf8c39f1a92ad8ad7de32744d"} Nov 25 21:48:21 crc kubenswrapper[4910]: I1125 21:48:21.688483 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:48:21 crc kubenswrapper[4910]: I1125 21:48:21.894535 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:22 crc kubenswrapper[4910]: I1125 21:48:22.214649 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5810cdb3-08d2-433b-b43e-ae16a13f108c","Type":"ContainerStarted","Data":"b83067cba9f0e3007f91dc6c33e8b3c46de73ab23b9538b0e594d8dcef535135"} Nov 25 21:48:22 crc kubenswrapper[4910]: I1125 21:48:22.215399 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 21:48:22 crc kubenswrapper[4910]: I1125 21:48:22.237007 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"24fe78bf-5d43-4896-b226-8d33a8856a13","Type":"ContainerStarted","Data":"a74e2aa7d366a7b34336b722ce83013b17746225f04aed47b520fa43cefcb806"} Nov 25 21:48:22 crc kubenswrapper[4910]: I1125 21:48:22.238419 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 25 21:48:22 crc kubenswrapper[4910]: I1125 21:48:22.248084 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.768652065 podStartE2EDuration="6.248070014s" podCreationTimestamp="2025-11-25 21:48:16 +0000 UTC" firstStartedPulling="2025-11-25 21:48:17.14168394 +0000 UTC m=+1052.604160262" lastFinishedPulling="2025-11-25 21:48:21.621101899 +0000 UTC m=+1057.083578211" observedRunningTime="2025-11-25 21:48:22.242785474 +0000 UTC m=+1057.705261796" watchObservedRunningTime="2025-11-25 21:48:22.248070014 +0000 UTC m=+1057.710546336" Nov 25 21:48:22 crc kubenswrapper[4910]: I1125 21:48:22.266715 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=5.266693888 podStartE2EDuration="5.266693888s" podCreationTimestamp="2025-11-25 21:48:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:22.266566985 +0000 UTC m=+1057.729043307" watchObservedRunningTime="2025-11-25 21:48:22.266693888 +0000 UTC m=+1057.729170210" Nov 25 21:48:22 crc kubenswrapper[4910]: I1125 21:48:22.386497 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:22 crc kubenswrapper[4910]: I1125 21:48:22.392570 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-657976db8d-swkbt" Nov 25 21:48:22 crc kubenswrapper[4910]: I1125 21:48:22.536514 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-58f8d7cc56-csk7l"] Nov 25 21:48:22 crc kubenswrapper[4910]: I1125 21:48:22.536768 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-58f8d7cc56-csk7l" podUID="78dc494b-f987-443a-a350-1988639b6fee" containerName="horizon-log" containerID="cri-o://3d1388e3f9936a5e6838117a86dbadfac66d243bb6179a027f0f0a595cdb55ae" gracePeriod=30 Nov 25 21:48:22 crc kubenswrapper[4910]: I1125 21:48:22.536886 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-58f8d7cc56-csk7l" podUID="78dc494b-f987-443a-a350-1988639b6fee" containerName="horizon" containerID="cri-o://3a6d55cf981774e9e32bdc5ee3a9fbb53ace068261c91c2d651a817d4ca4dc1f" gracePeriod=30 Nov 25 21:48:23 crc kubenswrapper[4910]: I1125 21:48:23.099376 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:48:23 crc kubenswrapper[4910]: I1125 21:48:23.099456 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:48:23 crc kubenswrapper[4910]: I1125 21:48:23.329480 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:48:23 crc kubenswrapper[4910]: I1125 21:48:23.421298 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-52tbh"] Nov 25 21:48:23 crc kubenswrapper[4910]: I1125 21:48:23.421623 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" podUID="9ad0d21f-7e9e-45a9-9c13-ff948f290e77" containerName="dnsmasq-dns" containerID="cri-o://3155f37560c10c34d0c4f774932c19a36e6fcf08e2b5d8a5f7beee87db578fc4" gracePeriod=10 Nov 25 21:48:23 crc kubenswrapper[4910]: I1125 21:48:23.625599 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 21:48:23 crc kubenswrapper[4910]: I1125 21:48:23.679908 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.079977 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.193575 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-config\") pod \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.193670 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-ovsdbserver-nb\") pod \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.193720 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-dns-swift-storage-0\") pod \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.193839 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-dns-svc\") pod \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.193946 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-ovsdbserver-sb\") pod \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.194016 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bv96x\" (UniqueName: \"kubernetes.io/projected/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-kube-api-access-bv96x\") pod \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\" (UID: \"9ad0d21f-7e9e-45a9-9c13-ff948f290e77\") " Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.241587 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-kube-api-access-bv96x" (OuterVolumeSpecName: "kube-api-access-bv96x") pod "9ad0d21f-7e9e-45a9-9c13-ff948f290e77" (UID: "9ad0d21f-7e9e-45a9-9c13-ff948f290e77"). InnerVolumeSpecName "kube-api-access-bv96x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.275649 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9ad0d21f-7e9e-45a9-9c13-ff948f290e77" (UID: "9ad0d21f-7e9e-45a9-9c13-ff948f290e77"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.296870 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bv96x\" (UniqueName: \"kubernetes.io/projected/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-kube-api-access-bv96x\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.296905 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.309891 4910 generic.go:334] "Generic (PLEG): container finished" podID="9ad0d21f-7e9e-45a9-9c13-ff948f290e77" containerID="3155f37560c10c34d0c4f774932c19a36e6fcf08e2b5d8a5f7beee87db578fc4" exitCode=0 Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.309966 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" event={"ID":"9ad0d21f-7e9e-45a9-9c13-ff948f290e77","Type":"ContainerDied","Data":"3155f37560c10c34d0c4f774932c19a36e6fcf08e2b5d8a5f7beee87db578fc4"} Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.310000 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" event={"ID":"9ad0d21f-7e9e-45a9-9c13-ff948f290e77","Type":"ContainerDied","Data":"2b102cf60990d5bba73bf536f8fbbe4a47b1a3c8f3c4adbf5493948c74d12f0a"} Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.310021 4910 scope.go:117] "RemoveContainer" containerID="3155f37560c10c34d0c4f774932c19a36e6fcf08e2b5d8a5f7beee87db578fc4" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.310144 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc5c4795-52tbh" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.324911 4910 generic.go:334] "Generic (PLEG): container finished" podID="ce75c296-74f5-4f94-bcdf-58bb1d44f445" containerID="2831751b92e100fb850ae3355d63ca71ab3c40b2f6e467623d701b84f49da36e" exitCode=0 Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.325439 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9ad0d21f-7e9e-45a9-9c13-ff948f290e77" (UID: "9ad0d21f-7e9e-45a9-9c13-ff948f290e77"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.325645 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="247c2c17-ca52-421a-b739-7926362deff2" containerName="cinder-scheduler" containerID="cri-o://8e16999319433e65c81038f2be29d132d8805f8aba58a7e0772891b57b9e6e5a" gracePeriod=30 Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.326343 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="247c2c17-ca52-421a-b739-7926362deff2" containerName="probe" containerID="cri-o://792d60c584649dd5f7142381aff44003a3dd2ae111fef3c888d73c485c8443b3" gracePeriod=30 Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.326482 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b94985954-h6p9h" event={"ID":"ce75c296-74f5-4f94-bcdf-58bb1d44f445","Type":"ContainerDied","Data":"2831751b92e100fb850ae3355d63ca71ab3c40b2f6e467623d701b84f49da36e"} Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.363354 4910 scope.go:117] "RemoveContainer" containerID="fef354f192599f289de05b42efa804f3df74fab9c02e4f5944dee1a6061c6448" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.388231 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9ad0d21f-7e9e-45a9-9c13-ff948f290e77" (UID: "9ad0d21f-7e9e-45a9-9c13-ff948f290e77"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.392446 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-config" (OuterVolumeSpecName: "config") pod "9ad0d21f-7e9e-45a9-9c13-ff948f290e77" (UID: "9ad0d21f-7e9e-45a9-9c13-ff948f290e77"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.398862 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.398890 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.398902 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.399076 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9ad0d21f-7e9e-45a9-9c13-ff948f290e77" (UID: "9ad0d21f-7e9e-45a9-9c13-ff948f290e77"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.411449 4910 scope.go:117] "RemoveContainer" containerID="3155f37560c10c34d0c4f774932c19a36e6fcf08e2b5d8a5f7beee87db578fc4" Nov 25 21:48:24 crc kubenswrapper[4910]: E1125 21:48:24.418174 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3155f37560c10c34d0c4f774932c19a36e6fcf08e2b5d8a5f7beee87db578fc4\": container with ID starting with 3155f37560c10c34d0c4f774932c19a36e6fcf08e2b5d8a5f7beee87db578fc4 not found: ID does not exist" containerID="3155f37560c10c34d0c4f774932c19a36e6fcf08e2b5d8a5f7beee87db578fc4" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.418231 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3155f37560c10c34d0c4f774932c19a36e6fcf08e2b5d8a5f7beee87db578fc4"} err="failed to get container status \"3155f37560c10c34d0c4f774932c19a36e6fcf08e2b5d8a5f7beee87db578fc4\": rpc error: code = NotFound desc = could not find container \"3155f37560c10c34d0c4f774932c19a36e6fcf08e2b5d8a5f7beee87db578fc4\": container with ID starting with 3155f37560c10c34d0c4f774932c19a36e6fcf08e2b5d8a5f7beee87db578fc4 not found: ID does not exist" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.418281 4910 scope.go:117] "RemoveContainer" containerID="fef354f192599f289de05b42efa804f3df74fab9c02e4f5944dee1a6061c6448" Nov 25 21:48:24 crc kubenswrapper[4910]: E1125 21:48:24.426217 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fef354f192599f289de05b42efa804f3df74fab9c02e4f5944dee1a6061c6448\": container with ID starting with fef354f192599f289de05b42efa804f3df74fab9c02e4f5944dee1a6061c6448 not found: ID does not exist" containerID="fef354f192599f289de05b42efa804f3df74fab9c02e4f5944dee1a6061c6448" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.426281 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fef354f192599f289de05b42efa804f3df74fab9c02e4f5944dee1a6061c6448"} err="failed to get container status \"fef354f192599f289de05b42efa804f3df74fab9c02e4f5944dee1a6061c6448\": rpc error: code = NotFound desc = could not find container \"fef354f192599f289de05b42efa804f3df74fab9c02e4f5944dee1a6061c6448\": container with ID starting with fef354f192599f289de05b42efa804f3df74fab9c02e4f5944dee1a6061c6448 not found: ID does not exist" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.501186 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ad0d21f-7e9e-45a9-9c13-ff948f290e77-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.543015 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.666490 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-52tbh"] Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.668169 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.674585 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc5c4795-52tbh"] Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.714773 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-config\") pod \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.714886 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-ovndb-tls-certs\") pod \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.714956 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54sjq\" (UniqueName: \"kubernetes.io/projected/ce75c296-74f5-4f94-bcdf-58bb1d44f445-kube-api-access-54sjq\") pod \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.715160 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-combined-ca-bundle\") pod \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.715209 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-httpd-config\") pod \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\" (UID: \"ce75c296-74f5-4f94-bcdf-58bb1d44f445\") " Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.720350 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "ce75c296-74f5-4f94-bcdf-58bb1d44f445" (UID: "ce75c296-74f5-4f94-bcdf-58bb1d44f445"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.730099 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ce75c296-74f5-4f94-bcdf-58bb1d44f445-kube-api-access-54sjq" (OuterVolumeSpecName: "kube-api-access-54sjq") pod "ce75c296-74f5-4f94-bcdf-58bb1d44f445" (UID: "ce75c296-74f5-4f94-bcdf-58bb1d44f445"). InnerVolumeSpecName "kube-api-access-54sjq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.764188 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-config" (OuterVolumeSpecName: "config") pod "ce75c296-74f5-4f94-bcdf-58bb1d44f445" (UID: "ce75c296-74f5-4f94-bcdf-58bb1d44f445"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.806334 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "ce75c296-74f5-4f94-bcdf-58bb1d44f445" (UID: "ce75c296-74f5-4f94-bcdf-58bb1d44f445"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.822735 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.822777 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.822804 4910 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.822823 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54sjq\" (UniqueName: \"kubernetes.io/projected/ce75c296-74f5-4f94-bcdf-58bb1d44f445-kube-api-access-54sjq\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.870358 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ce75c296-74f5-4f94-bcdf-58bb1d44f445" (UID: "ce75c296-74f5-4f94-bcdf-58bb1d44f445"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.914611 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-7ccccf649d-9sm5c" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.924752 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ce75c296-74f5-4f94-bcdf-58bb1d44f445-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.977305 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7c8449c4cd-d4lrg"] Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.977599 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7c8449c4cd-d4lrg" podUID="b1036586-470f-4688-a73b-b2849eae1c02" containerName="barbican-api-log" containerID="cri-o://fe52d772bd4be9a08daa027563de6a2534c4b03941b5704d73529cf17d9c3225" gracePeriod=30 Nov 25 21:48:24 crc kubenswrapper[4910]: I1125 21:48:24.977748 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-7c8449c4cd-d4lrg" podUID="b1036586-470f-4688-a73b-b2849eae1c02" containerName="barbican-api" containerID="cri-o://54a44786b30f9faa0d22af189569a75983882027794fadd099f9b3bcd5cfeeda" gracePeriod=30 Nov 25 21:48:25 crc kubenswrapper[4910]: I1125 21:48:25.222049 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ad0d21f-7e9e-45a9-9c13-ff948f290e77" path="/var/lib/kubelet/pods/9ad0d21f-7e9e-45a9-9c13-ff948f290e77/volumes" Nov 25 21:48:25 crc kubenswrapper[4910]: I1125 21:48:25.345169 4910 generic.go:334] "Generic (PLEG): container finished" podID="b1036586-470f-4688-a73b-b2849eae1c02" containerID="fe52d772bd4be9a08daa027563de6a2534c4b03941b5704d73529cf17d9c3225" exitCode=143 Nov 25 21:48:25 crc kubenswrapper[4910]: I1125 21:48:25.345256 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c8449c4cd-d4lrg" event={"ID":"b1036586-470f-4688-a73b-b2849eae1c02","Type":"ContainerDied","Data":"fe52d772bd4be9a08daa027563de6a2534c4b03941b5704d73529cf17d9c3225"} Nov 25 21:48:25 crc kubenswrapper[4910]: I1125 21:48:25.348226 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6b94985954-h6p9h" Nov 25 21:48:25 crc kubenswrapper[4910]: I1125 21:48:25.348771 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b94985954-h6p9h" event={"ID":"ce75c296-74f5-4f94-bcdf-58bb1d44f445","Type":"ContainerDied","Data":"7dffe93d7de1a059353097a174f448e5d599030aa297b44403aad8b9fa8373d1"} Nov 25 21:48:25 crc kubenswrapper[4910]: I1125 21:48:25.348802 4910 scope.go:117] "RemoveContainer" containerID="83a4db89007a69a8fb58c5a4a438dd5ae705dd0757f3686acaede6daf5df836c" Nov 25 21:48:25 crc kubenswrapper[4910]: I1125 21:48:25.383936 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6b94985954-h6p9h"] Nov 25 21:48:25 crc kubenswrapper[4910]: I1125 21:48:25.385453 4910 scope.go:117] "RemoveContainer" containerID="2831751b92e100fb850ae3355d63ca71ab3c40b2f6e467623d701b84f49da36e" Nov 25 21:48:25 crc kubenswrapper[4910]: I1125 21:48:25.412228 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-6b94985954-h6p9h"] Nov 25 21:48:25 crc kubenswrapper[4910]: I1125 21:48:25.680887 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-58f8d7cc56-csk7l" podUID="78dc494b-f987-443a-a350-1988639b6fee" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.147:8443/dashboard/auth/login/?next=/dashboard/\": read tcp 10.217.0.2:46212->10.217.0.147:8443: read: connection reset by peer" Nov 25 21:48:26 crc kubenswrapper[4910]: I1125 21:48:26.363095 4910 generic.go:334] "Generic (PLEG): container finished" podID="247c2c17-ca52-421a-b739-7926362deff2" containerID="792d60c584649dd5f7142381aff44003a3dd2ae111fef3c888d73c485c8443b3" exitCode=0 Nov 25 21:48:26 crc kubenswrapper[4910]: I1125 21:48:26.363225 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"247c2c17-ca52-421a-b739-7926362deff2","Type":"ContainerDied","Data":"792d60c584649dd5f7142381aff44003a3dd2ae111fef3c888d73c485c8443b3"} Nov 25 21:48:26 crc kubenswrapper[4910]: I1125 21:48:26.371267 4910 generic.go:334] "Generic (PLEG): container finished" podID="78dc494b-f987-443a-a350-1988639b6fee" containerID="3a6d55cf981774e9e32bdc5ee3a9fbb53ace068261c91c2d651a817d4ca4dc1f" exitCode=0 Nov 25 21:48:26 crc kubenswrapper[4910]: I1125 21:48:26.371300 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58f8d7cc56-csk7l" event={"ID":"78dc494b-f987-443a-a350-1988639b6fee","Type":"ContainerDied","Data":"3a6d55cf981774e9e32bdc5ee3a9fbb53ace068261c91c2d651a817d4ca4dc1f"} Nov 25 21:48:27 crc kubenswrapper[4910]: I1125 21:48:27.216328 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ce75c296-74f5-4f94-bcdf-58bb1d44f445" path="/var/lib/kubelet/pods/ce75c296-74f5-4f94-bcdf-58bb1d44f445/volumes" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.403074 4910 generic.go:334] "Generic (PLEG): container finished" podID="b1036586-470f-4688-a73b-b2849eae1c02" containerID="54a44786b30f9faa0d22af189569a75983882027794fadd099f9b3bcd5cfeeda" exitCode=0 Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.403316 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c8449c4cd-d4lrg" event={"ID":"b1036586-470f-4688-a73b-b2849eae1c02","Type":"ContainerDied","Data":"54a44786b30f9faa0d22af189569a75983882027794fadd099f9b3bcd5cfeeda"} Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.644679 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.801302 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.825296 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1036586-470f-4688-a73b-b2849eae1c02-logs\") pod \"b1036586-470f-4688-a73b-b2849eae1c02\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.825625 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-44n8b\" (UniqueName: \"kubernetes.io/projected/b1036586-470f-4688-a73b-b2849eae1c02-kube-api-access-44n8b\") pod \"b1036586-470f-4688-a73b-b2849eae1c02\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.825858 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-config-data\") pod \"b1036586-470f-4688-a73b-b2849eae1c02\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.825997 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-combined-ca-bundle\") pod \"b1036586-470f-4688-a73b-b2849eae1c02\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.826128 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-config-data-custom\") pod \"b1036586-470f-4688-a73b-b2849eae1c02\" (UID: \"b1036586-470f-4688-a73b-b2849eae1c02\") " Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.830188 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b1036586-470f-4688-a73b-b2849eae1c02-logs" (OuterVolumeSpecName: "logs") pod "b1036586-470f-4688-a73b-b2849eae1c02" (UID: "b1036586-470f-4688-a73b-b2849eae1c02"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.835765 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b1036586-470f-4688-a73b-b2849eae1c02" (UID: "b1036586-470f-4688-a73b-b2849eae1c02"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.837433 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1036586-470f-4688-a73b-b2849eae1c02-kube-api-access-44n8b" (OuterVolumeSpecName: "kube-api-access-44n8b") pod "b1036586-470f-4688-a73b-b2849eae1c02" (UID: "b1036586-470f-4688-a73b-b2849eae1c02"). InnerVolumeSpecName "kube-api-access-44n8b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.883742 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b1036586-470f-4688-a73b-b2849eae1c02" (UID: "b1036586-470f-4688-a73b-b2849eae1c02"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.896558 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-config-data" (OuterVolumeSpecName: "config-data") pod "b1036586-470f-4688-a73b-b2849eae1c02" (UID: "b1036586-470f-4688-a73b-b2849eae1c02"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.927792 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xmcv\" (UniqueName: \"kubernetes.io/projected/247c2c17-ca52-421a-b739-7926362deff2-kube-api-access-8xmcv\") pod \"247c2c17-ca52-421a-b739-7926362deff2\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.927916 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-config-data\") pod \"247c2c17-ca52-421a-b739-7926362deff2\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.927974 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-combined-ca-bundle\") pod \"247c2c17-ca52-421a-b739-7926362deff2\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.928004 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-config-data-custom\") pod \"247c2c17-ca52-421a-b739-7926362deff2\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.928033 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-scripts\") pod \"247c2c17-ca52-421a-b739-7926362deff2\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.928103 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/247c2c17-ca52-421a-b739-7926362deff2-etc-machine-id\") pod \"247c2c17-ca52-421a-b739-7926362deff2\" (UID: \"247c2c17-ca52-421a-b739-7926362deff2\") " Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.928533 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/247c2c17-ca52-421a-b739-7926362deff2-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "247c2c17-ca52-421a-b739-7926362deff2" (UID: "247c2c17-ca52-421a-b739-7926362deff2"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.928573 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b1036586-470f-4688-a73b-b2849eae1c02-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.928611 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-44n8b\" (UniqueName: \"kubernetes.io/projected/b1036586-470f-4688-a73b-b2849eae1c02-kube-api-access-44n8b\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.928628 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.928639 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.928649 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b1036586-470f-4688-a73b-b2849eae1c02-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.932635 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "247c2c17-ca52-421a-b739-7926362deff2" (UID: "247c2c17-ca52-421a-b739-7926362deff2"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.932703 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-scripts" (OuterVolumeSpecName: "scripts") pod "247c2c17-ca52-421a-b739-7926362deff2" (UID: "247c2c17-ca52-421a-b739-7926362deff2"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.933418 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/247c2c17-ca52-421a-b739-7926362deff2-kube-api-access-8xmcv" (OuterVolumeSpecName: "kube-api-access-8xmcv") pod "247c2c17-ca52-421a-b739-7926362deff2" (UID: "247c2c17-ca52-421a-b739-7926362deff2"). InnerVolumeSpecName "kube-api-access-8xmcv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:28 crc kubenswrapper[4910]: I1125 21:48:28.986128 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "247c2c17-ca52-421a-b739-7926362deff2" (UID: "247c2c17-ca52-421a-b739-7926362deff2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.031282 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.031315 4910 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/247c2c17-ca52-421a-b739-7926362deff2-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.031327 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xmcv\" (UniqueName: \"kubernetes.io/projected/247c2c17-ca52-421a-b739-7926362deff2-kube-api-access-8xmcv\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.031363 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.031373 4910 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.040570 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-config-data" (OuterVolumeSpecName: "config-data") pod "247c2c17-ca52-421a-b739-7926362deff2" (UID: "247c2c17-ca52-421a-b739-7926362deff2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.133889 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/247c2c17-ca52-421a-b739-7926362deff2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.162088 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-f7657d4c8-n2wbh" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.435201 4910 generic.go:334] "Generic (PLEG): container finished" podID="247c2c17-ca52-421a-b739-7926362deff2" containerID="8e16999319433e65c81038f2be29d132d8805f8aba58a7e0772891b57b9e6e5a" exitCode=0 Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.435271 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"247c2c17-ca52-421a-b739-7926362deff2","Type":"ContainerDied","Data":"8e16999319433e65c81038f2be29d132d8805f8aba58a7e0772891b57b9e6e5a"} Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.436424 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"247c2c17-ca52-421a-b739-7926362deff2","Type":"ContainerDied","Data":"9860501c3cc14d346471cd9960869ef2692b0d352ac42645a35f06bf0794985b"} Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.435401 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.436473 4910 scope.go:117] "RemoveContainer" containerID="792d60c584649dd5f7142381aff44003a3dd2ae111fef3c888d73c485c8443b3" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.442606 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-7c8449c4cd-d4lrg" event={"ID":"b1036586-470f-4688-a73b-b2849eae1c02","Type":"ContainerDied","Data":"6fced21950fbb1dc3de47c3ca3d39d05b0197b78f7b78acf049a41058450aff7"} Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.442735 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-7c8449c4cd-d4lrg" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.475187 4910 scope.go:117] "RemoveContainer" containerID="8e16999319433e65c81038f2be29d132d8805f8aba58a7e0772891b57b9e6e5a" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.485361 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-7c8449c4cd-d4lrg"] Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.502329 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-7c8449c4cd-d4lrg"] Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.513892 4910 scope.go:117] "RemoveContainer" containerID="792d60c584649dd5f7142381aff44003a3dd2ae111fef3c888d73c485c8443b3" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.514358 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 21:48:29 crc kubenswrapper[4910]: E1125 21:48:29.515443 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"792d60c584649dd5f7142381aff44003a3dd2ae111fef3c888d73c485c8443b3\": container with ID starting with 792d60c584649dd5f7142381aff44003a3dd2ae111fef3c888d73c485c8443b3 not found: ID does not exist" containerID="792d60c584649dd5f7142381aff44003a3dd2ae111fef3c888d73c485c8443b3" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.515478 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"792d60c584649dd5f7142381aff44003a3dd2ae111fef3c888d73c485c8443b3"} err="failed to get container status \"792d60c584649dd5f7142381aff44003a3dd2ae111fef3c888d73c485c8443b3\": rpc error: code = NotFound desc = could not find container \"792d60c584649dd5f7142381aff44003a3dd2ae111fef3c888d73c485c8443b3\": container with ID starting with 792d60c584649dd5f7142381aff44003a3dd2ae111fef3c888d73c485c8443b3 not found: ID does not exist" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.515506 4910 scope.go:117] "RemoveContainer" containerID="8e16999319433e65c81038f2be29d132d8805f8aba58a7e0772891b57b9e6e5a" Nov 25 21:48:29 crc kubenswrapper[4910]: E1125 21:48:29.517190 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e16999319433e65c81038f2be29d132d8805f8aba58a7e0772891b57b9e6e5a\": container with ID starting with 8e16999319433e65c81038f2be29d132d8805f8aba58a7e0772891b57b9e6e5a not found: ID does not exist" containerID="8e16999319433e65c81038f2be29d132d8805f8aba58a7e0772891b57b9e6e5a" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.517233 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e16999319433e65c81038f2be29d132d8805f8aba58a7e0772891b57b9e6e5a"} err="failed to get container status \"8e16999319433e65c81038f2be29d132d8805f8aba58a7e0772891b57b9e6e5a\": rpc error: code = NotFound desc = could not find container \"8e16999319433e65c81038f2be29d132d8805f8aba58a7e0772891b57b9e6e5a\": container with ID starting with 8e16999319433e65c81038f2be29d132d8805f8aba58a7e0772891b57b9e6e5a not found: ID does not exist" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.517274 4910 scope.go:117] "RemoveContainer" containerID="54a44786b30f9faa0d22af189569a75983882027794fadd099f9b3bcd5cfeeda" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.527382 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.536865 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 21:48:29 crc kubenswrapper[4910]: E1125 21:48:29.537636 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce75c296-74f5-4f94-bcdf-58bb1d44f445" containerName="neutron-api" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.537672 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce75c296-74f5-4f94-bcdf-58bb1d44f445" containerName="neutron-api" Nov 25 21:48:29 crc kubenswrapper[4910]: E1125 21:48:29.537694 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="247c2c17-ca52-421a-b739-7926362deff2" containerName="cinder-scheduler" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.537706 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="247c2c17-ca52-421a-b739-7926362deff2" containerName="cinder-scheduler" Nov 25 21:48:29 crc kubenswrapper[4910]: E1125 21:48:29.537725 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ce75c296-74f5-4f94-bcdf-58bb1d44f445" containerName="neutron-httpd" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.537735 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ce75c296-74f5-4f94-bcdf-58bb1d44f445" containerName="neutron-httpd" Nov 25 21:48:29 crc kubenswrapper[4910]: E1125 21:48:29.537759 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ad0d21f-7e9e-45a9-9c13-ff948f290e77" containerName="dnsmasq-dns" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.537768 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ad0d21f-7e9e-45a9-9c13-ff948f290e77" containerName="dnsmasq-dns" Nov 25 21:48:29 crc kubenswrapper[4910]: E1125 21:48:29.537787 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ad0d21f-7e9e-45a9-9c13-ff948f290e77" containerName="init" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.537795 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ad0d21f-7e9e-45a9-9c13-ff948f290e77" containerName="init" Nov 25 21:48:29 crc kubenswrapper[4910]: E1125 21:48:29.537808 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a49af42e-3a80-4f2a-9b4b-f43946a32c49" containerName="horizon-log" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.537818 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a49af42e-3a80-4f2a-9b4b-f43946a32c49" containerName="horizon-log" Nov 25 21:48:29 crc kubenswrapper[4910]: E1125 21:48:29.537836 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1036586-470f-4688-a73b-b2849eae1c02" containerName="barbican-api" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.537849 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1036586-470f-4688-a73b-b2849eae1c02" containerName="barbican-api" Nov 25 21:48:29 crc kubenswrapper[4910]: E1125 21:48:29.537863 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="247c2c17-ca52-421a-b739-7926362deff2" containerName="probe" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.537871 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="247c2c17-ca52-421a-b739-7926362deff2" containerName="probe" Nov 25 21:48:29 crc kubenswrapper[4910]: E1125 21:48:29.537900 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1036586-470f-4688-a73b-b2849eae1c02" containerName="barbican-api-log" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.537909 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1036586-470f-4688-a73b-b2849eae1c02" containerName="barbican-api-log" Nov 25 21:48:29 crc kubenswrapper[4910]: E1125 21:48:29.537929 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a49af42e-3a80-4f2a-9b4b-f43946a32c49" containerName="horizon" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.537938 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a49af42e-3a80-4f2a-9b4b-f43946a32c49" containerName="horizon" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.538396 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce75c296-74f5-4f94-bcdf-58bb1d44f445" containerName="neutron-httpd" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.538422 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a49af42e-3a80-4f2a-9b4b-f43946a32c49" containerName="horizon" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.538434 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ad0d21f-7e9e-45a9-9c13-ff948f290e77" containerName="dnsmasq-dns" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.538454 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="247c2c17-ca52-421a-b739-7926362deff2" containerName="cinder-scheduler" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.538471 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ce75c296-74f5-4f94-bcdf-58bb1d44f445" containerName="neutron-api" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.538479 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a49af42e-3a80-4f2a-9b4b-f43946a32c49" containerName="horizon-log" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.538490 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="247c2c17-ca52-421a-b739-7926362deff2" containerName="probe" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.538506 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1036586-470f-4688-a73b-b2849eae1c02" containerName="barbican-api-log" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.538516 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1036586-470f-4688-a73b-b2849eae1c02" containerName="barbican-api" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.540112 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.546367 4910 scope.go:117] "RemoveContainer" containerID="fe52d772bd4be9a08daa027563de6a2534c4b03941b5704d73529cf17d9c3225" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.553603 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.576425 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.645792 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f9dafbee-be84-4df3-a1d1-6ff36015ec46-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.645933 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9dafbee-be84-4df3-a1d1-6ff36015ec46-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.646115 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9dafbee-be84-4df3-a1d1-6ff36015ec46-scripts\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.646318 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f9dafbee-be84-4df3-a1d1-6ff36015ec46-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.646475 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pwdvz\" (UniqueName: \"kubernetes.io/projected/f9dafbee-be84-4df3-a1d1-6ff36015ec46-kube-api-access-pwdvz\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.646669 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9dafbee-be84-4df3-a1d1-6ff36015ec46-config-data\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.748858 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pwdvz\" (UniqueName: \"kubernetes.io/projected/f9dafbee-be84-4df3-a1d1-6ff36015ec46-kube-api-access-pwdvz\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.749504 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9dafbee-be84-4df3-a1d1-6ff36015ec46-config-data\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.750641 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f9dafbee-be84-4df3-a1d1-6ff36015ec46-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.750678 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9dafbee-be84-4df3-a1d1-6ff36015ec46-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.750727 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9dafbee-be84-4df3-a1d1-6ff36015ec46-scripts\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.750805 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f9dafbee-be84-4df3-a1d1-6ff36015ec46-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.750931 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f9dafbee-be84-4df3-a1d1-6ff36015ec46-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.758165 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9dafbee-be84-4df3-a1d1-6ff36015ec46-scripts\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.758459 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9dafbee-be84-4df3-a1d1-6ff36015ec46-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.758736 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9dafbee-be84-4df3-a1d1-6ff36015ec46-config-data\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.758818 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f9dafbee-be84-4df3-a1d1-6ff36015ec46-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.769090 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pwdvz\" (UniqueName: \"kubernetes.io/projected/f9dafbee-be84-4df3-a1d1-6ff36015ec46-kube-api-access-pwdvz\") pod \"cinder-scheduler-0\" (UID: \"f9dafbee-be84-4df3-a1d1-6ff36015ec46\") " pod="openstack/cinder-scheduler-0" Nov 25 21:48:29 crc kubenswrapper[4910]: I1125 21:48:29.879644 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 25 21:48:30 crc kubenswrapper[4910]: I1125 21:48:30.417578 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 25 21:48:30 crc kubenswrapper[4910]: W1125 21:48:30.418336 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf9dafbee_be84_4df3_a1d1_6ff36015ec46.slice/crio-6552b826b1d78f412d8d818200fd410081e487d621fc2959539ed7f0177fe705 WatchSource:0}: Error finding container 6552b826b1d78f412d8d818200fd410081e487d621fc2959539ed7f0177fe705: Status 404 returned error can't find the container with id 6552b826b1d78f412d8d818200fd410081e487d621fc2959539ed7f0177fe705 Nov 25 21:48:30 crc kubenswrapper[4910]: I1125 21:48:30.459622 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f9dafbee-be84-4df3-a1d1-6ff36015ec46","Type":"ContainerStarted","Data":"6552b826b1d78f412d8d818200fd410081e487d621fc2959539ed7f0177fe705"} Nov 25 21:48:30 crc kubenswrapper[4910]: I1125 21:48:30.837757 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 21:48:30 crc kubenswrapper[4910]: I1125 21:48:30.839119 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 21:48:30 crc kubenswrapper[4910]: I1125 21:48:30.841272 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-slb5m" Nov 25 21:48:30 crc kubenswrapper[4910]: I1125 21:48:30.842844 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 25 21:48:30 crc kubenswrapper[4910]: I1125 21:48:30.843193 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 25 21:48:30 crc kubenswrapper[4910]: I1125 21:48:30.882971 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 21:48:30 crc kubenswrapper[4910]: I1125 21:48:30.927622 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 25 21:48:30 crc kubenswrapper[4910]: I1125 21:48:30.982221 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-openstack-config\") pod \"openstackclient\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " pod="openstack/openstackclient" Nov 25 21:48:30 crc kubenswrapper[4910]: I1125 21:48:30.982324 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7wsrp\" (UniqueName: \"kubernetes.io/projected/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-kube-api-access-7wsrp\") pod \"openstackclient\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " pod="openstack/openstackclient" Nov 25 21:48:30 crc kubenswrapper[4910]: I1125 21:48:30.982447 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-openstack-config-secret\") pod \"openstackclient\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " pod="openstack/openstackclient" Nov 25 21:48:30 crc kubenswrapper[4910]: I1125 21:48:30.982485 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.085510 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-openstack-config\") pod \"openstackclient\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.086023 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wsrp\" (UniqueName: \"kubernetes.io/projected/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-kube-api-access-7wsrp\") pod \"openstackclient\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.086143 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-openstack-config-secret\") pod \"openstackclient\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.086182 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.089513 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-openstack-config\") pod \"openstackclient\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: E1125 21:48:31.090699 4910 projected.go:194] Error preparing data for projected volume kube-api-access-7wsrp for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: User "system:node:crc" cannot create resource "serviceaccounts/token" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 25 21:48:31 crc kubenswrapper[4910]: E1125 21:48:31.090751 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-kube-api-access-7wsrp podName:257a5160-aa73-4f3f-9a21-f46b3c62fd7d nodeName:}" failed. No retries permitted until 2025-11-25 21:48:31.590733441 +0000 UTC m=+1067.053209763 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-7wsrp" (UniqueName: "kubernetes.io/projected/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-kube-api-access-7wsrp") pod "openstackclient" (UID: "257a5160-aa73-4f3f-9a21-f46b3c62fd7d") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: User "system:node:crc" cannot create resource "serviceaccounts/token" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.096879 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-openstack-config-secret\") pod \"openstackclient\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.097499 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.100075 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 25 21:48:31 crc kubenswrapper[4910]: E1125 21:48:31.101018 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-7wsrp], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/openstackclient" podUID="257a5160-aa73-4f3f-9a21-f46b3c62fd7d" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.117912 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.195921 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.197616 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.244940 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="247c2c17-ca52-421a-b739-7926362deff2" path="/var/lib/kubelet/pods/247c2c17-ca52-421a-b739-7926362deff2/volumes" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.248830 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1036586-470f-4688-a73b-b2849eae1c02" path="/var/lib/kubelet/pods/b1036586-470f-4688-a73b-b2849eae1c02/volumes" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.250061 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.290881 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e36f2312-81e7-4b57-9131-695681724f08-openstack-config\") pod \"openstackclient\" (UID: \"e36f2312-81e7-4b57-9131-695681724f08\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.290946 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56bmt\" (UniqueName: \"kubernetes.io/projected/e36f2312-81e7-4b57-9131-695681724f08-kube-api-access-56bmt\") pod \"openstackclient\" (UID: \"e36f2312-81e7-4b57-9131-695681724f08\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.290988 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e36f2312-81e7-4b57-9131-695681724f08-openstack-config-secret\") pod \"openstackclient\" (UID: \"e36f2312-81e7-4b57-9131-695681724f08\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.291143 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e36f2312-81e7-4b57-9131-695681724f08-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e36f2312-81e7-4b57-9131-695681724f08\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.392630 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56bmt\" (UniqueName: \"kubernetes.io/projected/e36f2312-81e7-4b57-9131-695681724f08-kube-api-access-56bmt\") pod \"openstackclient\" (UID: \"e36f2312-81e7-4b57-9131-695681724f08\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.392817 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e36f2312-81e7-4b57-9131-695681724f08-openstack-config-secret\") pod \"openstackclient\" (UID: \"e36f2312-81e7-4b57-9131-695681724f08\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.392992 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e36f2312-81e7-4b57-9131-695681724f08-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e36f2312-81e7-4b57-9131-695681724f08\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.393108 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e36f2312-81e7-4b57-9131-695681724f08-openstack-config\") pod \"openstackclient\" (UID: \"e36f2312-81e7-4b57-9131-695681724f08\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.394002 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/e36f2312-81e7-4b57-9131-695681724f08-openstack-config\") pod \"openstackclient\" (UID: \"e36f2312-81e7-4b57-9131-695681724f08\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.400183 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/e36f2312-81e7-4b57-9131-695681724f08-openstack-config-secret\") pod \"openstackclient\" (UID: \"e36f2312-81e7-4b57-9131-695681724f08\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.400452 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e36f2312-81e7-4b57-9131-695681724f08-combined-ca-bundle\") pod \"openstackclient\" (UID: \"e36f2312-81e7-4b57-9131-695681724f08\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.408948 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56bmt\" (UniqueName: \"kubernetes.io/projected/e36f2312-81e7-4b57-9131-695681724f08-kube-api-access-56bmt\") pod \"openstackclient\" (UID: \"e36f2312-81e7-4b57-9131-695681724f08\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.477970 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.478182 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f9dafbee-be84-4df3-a1d1-6ff36015ec46","Type":"ContainerStarted","Data":"1a2227ba0c52a8e4bc26a203b9847f7fecc12df7763f72caedcd33b01c835019"} Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.529231 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.533043 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.534369 4910 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="257a5160-aa73-4f3f-9a21-f46b3c62fd7d" podUID="e36f2312-81e7-4b57-9131-695681724f08" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.597346 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7wsrp\" (UniqueName: \"kubernetes.io/projected/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-kube-api-access-7wsrp\") pod \"openstackclient\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " pod="openstack/openstackclient" Nov 25 21:48:31 crc kubenswrapper[4910]: E1125 21:48:31.600580 4910 projected.go:194] Error preparing data for projected volume kube-api-access-7wsrp for pod openstack/openstackclient: failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (257a5160-aa73-4f3f-9a21-f46b3c62fd7d) does not match the UID in record. The object might have been deleted and then recreated Nov 25 21:48:31 crc kubenswrapper[4910]: E1125 21:48:31.600658 4910 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-kube-api-access-7wsrp podName:257a5160-aa73-4f3f-9a21-f46b3c62fd7d nodeName:}" failed. No retries permitted until 2025-11-25 21:48:32.60063472 +0000 UTC m=+1068.063111042 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-7wsrp" (UniqueName: "kubernetes.io/projected/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-kube-api-access-7wsrp") pod "openstackclient" (UID: "257a5160-aa73-4f3f-9a21-f46b3c62fd7d") : failed to fetch token: serviceaccounts "openstackclient-openstackclient" is forbidden: the UID in the bound object reference (257a5160-aa73-4f3f-9a21-f46b3c62fd7d) does not match the UID in record. The object might have been deleted and then recreated Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.701996 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-openstack-config\") pod \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.702317 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-combined-ca-bundle\") pod \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.702393 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-openstack-config-secret\") pod \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\" (UID: \"257a5160-aa73-4f3f-9a21-f46b3c62fd7d\") " Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.702390 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "257a5160-aa73-4f3f-9a21-f46b3c62fd7d" (UID: "257a5160-aa73-4f3f-9a21-f46b3c62fd7d"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.705545 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7wsrp\" (UniqueName: \"kubernetes.io/projected/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-kube-api-access-7wsrp\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.705580 4910 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.707394 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "257a5160-aa73-4f3f-9a21-f46b3c62fd7d" (UID: "257a5160-aa73-4f3f-9a21-f46b3c62fd7d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.708032 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "257a5160-aa73-4f3f-9a21-f46b3c62fd7d" (UID: "257a5160-aa73-4f3f-9a21-f46b3c62fd7d"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.807818 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.807854 4910 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/257a5160-aa73-4f3f-9a21-f46b3c62fd7d-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:31 crc kubenswrapper[4910]: I1125 21:48:31.997538 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 25 21:48:32 crc kubenswrapper[4910]: I1125 21:48:32.493503 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f9dafbee-be84-4df3-a1d1-6ff36015ec46","Type":"ContainerStarted","Data":"8721f2d049ec2a3461034672bd27370ffc6c288d2b7a26ff55b4be6791ecd29b"} Nov 25 21:48:32 crc kubenswrapper[4910]: I1125 21:48:32.496989 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"e36f2312-81e7-4b57-9131-695681724f08","Type":"ContainerStarted","Data":"2b56e234b41d2ddc329d051127f3994d45289ae5f54ba5dc675f1411002cb403"} Nov 25 21:48:32 crc kubenswrapper[4910]: I1125 21:48:32.497012 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 25 21:48:32 crc kubenswrapper[4910]: I1125 21:48:32.525654 4910 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="257a5160-aa73-4f3f-9a21-f46b3c62fd7d" podUID="e36f2312-81e7-4b57-9131-695681724f08" Nov 25 21:48:33 crc kubenswrapper[4910]: I1125 21:48:33.236691 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="257a5160-aa73-4f3f-9a21-f46b3c62fd7d" path="/var/lib/kubelet/pods/257a5160-aa73-4f3f-9a21-f46b3c62fd7d/volumes" Nov 25 21:48:33 crc kubenswrapper[4910]: I1125 21:48:33.392490 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7c8449c4cd-d4lrg" podUID="b1036586-470f-4688-a73b-b2849eae1c02" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 21:48:33 crc kubenswrapper[4910]: I1125 21:48:33.392512 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-7c8449c4cd-d4lrg" podUID="b1036586-470f-4688-a73b-b2849eae1c02" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 21:48:34 crc kubenswrapper[4910]: I1125 21:48:34.880268 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 25 21:48:35 crc kubenswrapper[4910]: I1125 21:48:35.239674 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=6.23958376 podStartE2EDuration="6.23958376s" podCreationTimestamp="2025-11-25 21:48:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:32.521863712 +0000 UTC m=+1067.984340034" watchObservedRunningTime="2025-11-25 21:48:35.23958376 +0000 UTC m=+1070.702060082" Nov 25 21:48:35 crc kubenswrapper[4910]: I1125 21:48:35.351955 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-58f8d7cc56-csk7l" podUID="78dc494b-f987-443a-a350-1988639b6fee" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.147:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.147:8443: connect: connection refused" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.020826 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-6575886cb7-hv9qm"] Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.022945 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.025893 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.026105 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.027110 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.035069 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6575886cb7-hv9qm"] Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.127903 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-internal-tls-certs\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.127987 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjjkh\" (UniqueName: \"kubernetes.io/projected/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-kube-api-access-mjjkh\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.128029 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-run-httpd\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.128051 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-config-data\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.128075 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-public-tls-certs\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.128206 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-combined-ca-bundle\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.128325 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-etc-swift\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.128360 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-log-httpd\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.234069 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-etc-swift\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.234147 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-log-httpd\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.234192 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-internal-tls-certs\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.234304 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjjkh\" (UniqueName: \"kubernetes.io/projected/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-kube-api-access-mjjkh\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.234370 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-run-httpd\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.234402 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-config-data\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.234438 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-public-tls-certs\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.234509 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-combined-ca-bundle\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.235835 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-log-httpd\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.237289 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-run-httpd\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.243697 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-internal-tls-certs\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.244802 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-combined-ca-bundle\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.245795 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-etc-swift\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.246497 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-config-data\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.246960 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-public-tls-certs\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.254449 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjjkh\" (UniqueName: \"kubernetes.io/projected/8028bd01-f5f2-4c20-9f51-c6a7e06571fd-kube-api-access-mjjkh\") pod \"swift-proxy-6575886cb7-hv9qm\" (UID: \"8028bd01-f5f2-4c20-9f51-c6a7e06571fd\") " pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:37 crc kubenswrapper[4910]: I1125 21:48:37.342643 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:38 crc kubenswrapper[4910]: I1125 21:48:38.100500 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:38 crc kubenswrapper[4910]: I1125 21:48:38.101296 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="ceilometer-central-agent" containerID="cri-o://122cff3a8c7d363474cb5a00f4df0302716e6a8a476575acc5474e6e1e7d5588" gracePeriod=30 Nov 25 21:48:38 crc kubenswrapper[4910]: I1125 21:48:38.101466 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="ceilometer-notification-agent" containerID="cri-o://633551ab5b12cf043c15a2de1981631a863ed3792a2a8b8cd89951c8a8c8fc9e" gracePeriod=30 Nov 25 21:48:38 crc kubenswrapper[4910]: I1125 21:48:38.101452 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="sg-core" containerID="cri-o://c03ac0d89a8ae2ab92700224308d02b40b3a1add69c76e5af89f688958bd7d92" gracePeriod=30 Nov 25 21:48:38 crc kubenswrapper[4910]: I1125 21:48:38.101664 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="proxy-httpd" containerID="cri-o://b83067cba9f0e3007f91dc6c33e8b3c46de73ab23b9538b0e594d8dcef535135" gracePeriod=30 Nov 25 21:48:38 crc kubenswrapper[4910]: I1125 21:48:38.114852 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.165:3000/\": EOF" Nov 25 21:48:38 crc kubenswrapper[4910]: I1125 21:48:38.592211 4910 generic.go:334] "Generic (PLEG): container finished" podID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerID="b83067cba9f0e3007f91dc6c33e8b3c46de73ab23b9538b0e594d8dcef535135" exitCode=0 Nov 25 21:48:38 crc kubenswrapper[4910]: I1125 21:48:38.592263 4910 generic.go:334] "Generic (PLEG): container finished" podID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerID="c03ac0d89a8ae2ab92700224308d02b40b3a1add69c76e5af89f688958bd7d92" exitCode=2 Nov 25 21:48:38 crc kubenswrapper[4910]: I1125 21:48:38.592285 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5810cdb3-08d2-433b-b43e-ae16a13f108c","Type":"ContainerDied","Data":"b83067cba9f0e3007f91dc6c33e8b3c46de73ab23b9538b0e594d8dcef535135"} Nov 25 21:48:38 crc kubenswrapper[4910]: I1125 21:48:38.592368 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5810cdb3-08d2-433b-b43e-ae16a13f108c","Type":"ContainerDied","Data":"c03ac0d89a8ae2ab92700224308d02b40b3a1add69c76e5af89f688958bd7d92"} Nov 25 21:48:39 crc kubenswrapper[4910]: I1125 21:48:39.258803 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:39 crc kubenswrapper[4910]: I1125 21:48:39.259032 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-64ff96875d-p4n97" Nov 25 21:48:39 crc kubenswrapper[4910]: I1125 21:48:39.555274 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:48:39 crc kubenswrapper[4910]: I1125 21:48:39.556050 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f2e4643d-59dd-4252-8b35-c6f3003aa3d0" containerName="glance-log" containerID="cri-o://2ee3018aa2cdd5ada4ecc518f96b93a45dcf57c4e8f7487fb67bd09c483a0232" gracePeriod=30 Nov 25 21:48:39 crc kubenswrapper[4910]: I1125 21:48:39.556172 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="f2e4643d-59dd-4252-8b35-c6f3003aa3d0" containerName="glance-httpd" containerID="cri-o://0c983214d8e6bfa114d31ef3aae2f0cf76f47113e3011d6a00e49e39becd1303" gracePeriod=30 Nov 25 21:48:39 crc kubenswrapper[4910]: I1125 21:48:39.610951 4910 generic.go:334] "Generic (PLEG): container finished" podID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerID="122cff3a8c7d363474cb5a00f4df0302716e6a8a476575acc5474e6e1e7d5588" exitCode=0 Nov 25 21:48:39 crc kubenswrapper[4910]: I1125 21:48:39.611039 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5810cdb3-08d2-433b-b43e-ae16a13f108c","Type":"ContainerDied","Data":"122cff3a8c7d363474cb5a00f4df0302716e6a8a476575acc5474e6e1e7d5588"} Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.039842 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-78559"] Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.042760 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-78559" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.066733 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-78559"] Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.142485 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-sr4sn"] Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.144626 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-sr4sn" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.164201 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-sr4sn"] Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.199835 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46mmf\" (UniqueName: \"kubernetes.io/projected/3b417522-64b1-43ad-84e9-19795c605ebf-kube-api-access-46mmf\") pod \"nova-api-db-create-78559\" (UID: \"3b417522-64b1-43ad-84e9-19795c605ebf\") " pod="openstack/nova-api-db-create-78559" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.199916 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b417522-64b1-43ad-84e9-19795c605ebf-operator-scripts\") pod \"nova-api-db-create-78559\" (UID: \"3b417522-64b1-43ad-84e9-19795c605ebf\") " pod="openstack/nova-api-db-create-78559" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.265258 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-3555-account-create-update-4wx9j"] Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.266727 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3555-account-create-update-4wx9j" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.269366 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.295043 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-3555-account-create-update-4wx9j"] Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.301904 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5tcc\" (UniqueName: \"kubernetes.io/projected/d012ed8c-8195-4ea9-b3e3-4a3e750e8d70-kube-api-access-j5tcc\") pod \"nova-cell0-db-create-sr4sn\" (UID: \"d012ed8c-8195-4ea9-b3e3-4a3e750e8d70\") " pod="openstack/nova-cell0-db-create-sr4sn" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.302173 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d012ed8c-8195-4ea9-b3e3-4a3e750e8d70-operator-scripts\") pod \"nova-cell0-db-create-sr4sn\" (UID: \"d012ed8c-8195-4ea9-b3e3-4a3e750e8d70\") " pod="openstack/nova-cell0-db-create-sr4sn" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.302309 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46mmf\" (UniqueName: \"kubernetes.io/projected/3b417522-64b1-43ad-84e9-19795c605ebf-kube-api-access-46mmf\") pod \"nova-api-db-create-78559\" (UID: \"3b417522-64b1-43ad-84e9-19795c605ebf\") " pod="openstack/nova-api-db-create-78559" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.302425 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b417522-64b1-43ad-84e9-19795c605ebf-operator-scripts\") pod \"nova-api-db-create-78559\" (UID: \"3b417522-64b1-43ad-84e9-19795c605ebf\") " pod="openstack/nova-api-db-create-78559" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.304375 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b417522-64b1-43ad-84e9-19795c605ebf-operator-scripts\") pod \"nova-api-db-create-78559\" (UID: \"3b417522-64b1-43ad-84e9-19795c605ebf\") " pod="openstack/nova-api-db-create-78559" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.358949 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46mmf\" (UniqueName: \"kubernetes.io/projected/3b417522-64b1-43ad-84e9-19795c605ebf-kube-api-access-46mmf\") pod \"nova-api-db-create-78559\" (UID: \"3b417522-64b1-43ad-84e9-19795c605ebf\") " pod="openstack/nova-api-db-create-78559" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.359911 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-78559" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.370322 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-fns6z"] Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.371782 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fns6z" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.390138 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-fns6z"] Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.404575 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4t6m\" (UniqueName: \"kubernetes.io/projected/d8187b53-8402-4ae3-b580-5afa43f29e9f-kube-api-access-x4t6m\") pod \"nova-api-3555-account-create-update-4wx9j\" (UID: \"d8187b53-8402-4ae3-b580-5afa43f29e9f\") " pod="openstack/nova-api-3555-account-create-update-4wx9j" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.404647 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5tcc\" (UniqueName: \"kubernetes.io/projected/d012ed8c-8195-4ea9-b3e3-4a3e750e8d70-kube-api-access-j5tcc\") pod \"nova-cell0-db-create-sr4sn\" (UID: \"d012ed8c-8195-4ea9-b3e3-4a3e750e8d70\") " pod="openstack/nova-cell0-db-create-sr4sn" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.404687 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d012ed8c-8195-4ea9-b3e3-4a3e750e8d70-operator-scripts\") pod \"nova-cell0-db-create-sr4sn\" (UID: \"d012ed8c-8195-4ea9-b3e3-4a3e750e8d70\") " pod="openstack/nova-cell0-db-create-sr4sn" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.404719 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8187b53-8402-4ae3-b580-5afa43f29e9f-operator-scripts\") pod \"nova-api-3555-account-create-update-4wx9j\" (UID: \"d8187b53-8402-4ae3-b580-5afa43f29e9f\") " pod="openstack/nova-api-3555-account-create-update-4wx9j" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.406206 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d012ed8c-8195-4ea9-b3e3-4a3e750e8d70-operator-scripts\") pod \"nova-cell0-db-create-sr4sn\" (UID: \"d012ed8c-8195-4ea9-b3e3-4a3e750e8d70\") " pod="openstack/nova-cell0-db-create-sr4sn" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.448997 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5tcc\" (UniqueName: \"kubernetes.io/projected/d012ed8c-8195-4ea9-b3e3-4a3e750e8d70-kube-api-access-j5tcc\") pod \"nova-cell0-db-create-sr4sn\" (UID: \"d012ed8c-8195-4ea9-b3e3-4a3e750e8d70\") " pod="openstack/nova-cell0-db-create-sr4sn" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.456009 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-d4c6-account-create-update-4bczb"] Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.457454 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.460991 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.472945 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-sr4sn" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.477448 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-d4c6-account-create-update-4bczb"] Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.506734 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80-operator-scripts\") pod \"nova-cell1-db-create-fns6z\" (UID: \"e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80\") " pod="openstack/nova-cell1-db-create-fns6z" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.506855 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4t6m\" (UniqueName: \"kubernetes.io/projected/d8187b53-8402-4ae3-b580-5afa43f29e9f-kube-api-access-x4t6m\") pod \"nova-api-3555-account-create-update-4wx9j\" (UID: \"d8187b53-8402-4ae3-b580-5afa43f29e9f\") " pod="openstack/nova-api-3555-account-create-update-4wx9j" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.506916 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8187b53-8402-4ae3-b580-5afa43f29e9f-operator-scripts\") pod \"nova-api-3555-account-create-update-4wx9j\" (UID: \"d8187b53-8402-4ae3-b580-5afa43f29e9f\") " pod="openstack/nova-api-3555-account-create-update-4wx9j" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.506943 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dnskh\" (UniqueName: \"kubernetes.io/projected/e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80-kube-api-access-dnskh\") pod \"nova-cell1-db-create-fns6z\" (UID: \"e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80\") " pod="openstack/nova-cell1-db-create-fns6z" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.507936 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8187b53-8402-4ae3-b580-5afa43f29e9f-operator-scripts\") pod \"nova-api-3555-account-create-update-4wx9j\" (UID: \"d8187b53-8402-4ae3-b580-5afa43f29e9f\") " pod="openstack/nova-api-3555-account-create-update-4wx9j" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.531703 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4t6m\" (UniqueName: \"kubernetes.io/projected/d8187b53-8402-4ae3-b580-5afa43f29e9f-kube-api-access-x4t6m\") pod \"nova-api-3555-account-create-update-4wx9j\" (UID: \"d8187b53-8402-4ae3-b580-5afa43f29e9f\") " pod="openstack/nova-api-3555-account-create-update-4wx9j" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.606832 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3555-account-create-update-4wx9j" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.609197 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dnskh\" (UniqueName: \"kubernetes.io/projected/e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80-kube-api-access-dnskh\") pod \"nova-cell1-db-create-fns6z\" (UID: \"e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80\") " pod="openstack/nova-cell1-db-create-fns6z" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.609265 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdgrn\" (UniqueName: \"kubernetes.io/projected/fb48d3cb-07fa-4be7-bbb2-8af493a83edf-kube-api-access-hdgrn\") pod \"nova-cell0-d4c6-account-create-update-4bczb\" (UID: \"fb48d3cb-07fa-4be7-bbb2-8af493a83edf\") " pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.609298 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80-operator-scripts\") pod \"nova-cell1-db-create-fns6z\" (UID: \"e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80\") " pod="openstack/nova-cell1-db-create-fns6z" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.609352 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb48d3cb-07fa-4be7-bbb2-8af493a83edf-operator-scripts\") pod \"nova-cell0-d4c6-account-create-update-4bczb\" (UID: \"fb48d3cb-07fa-4be7-bbb2-8af493a83edf\") " pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.613231 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80-operator-scripts\") pod \"nova-cell1-db-create-fns6z\" (UID: \"e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80\") " pod="openstack/nova-cell1-db-create-fns6z" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.643035 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dnskh\" (UniqueName: \"kubernetes.io/projected/e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80-kube-api-access-dnskh\") pod \"nova-cell1-db-create-fns6z\" (UID: \"e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80\") " pod="openstack/nova-cell1-db-create-fns6z" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.646929 4910 generic.go:334] "Generic (PLEG): container finished" podID="f2e4643d-59dd-4252-8b35-c6f3003aa3d0" containerID="2ee3018aa2cdd5ada4ecc518f96b93a45dcf57c4e8f7487fb67bd09c483a0232" exitCode=143 Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.646983 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f2e4643d-59dd-4252-8b35-c6f3003aa3d0","Type":"ContainerDied","Data":"2ee3018aa2cdd5ada4ecc518f96b93a45dcf57c4e8f7487fb67bd09c483a0232"} Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.714810 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb48d3cb-07fa-4be7-bbb2-8af493a83edf-operator-scripts\") pod \"nova-cell0-d4c6-account-create-update-4bczb\" (UID: \"fb48d3cb-07fa-4be7-bbb2-8af493a83edf\") " pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.714991 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdgrn\" (UniqueName: \"kubernetes.io/projected/fb48d3cb-07fa-4be7-bbb2-8af493a83edf-kube-api-access-hdgrn\") pod \"nova-cell0-d4c6-account-create-update-4bczb\" (UID: \"fb48d3cb-07fa-4be7-bbb2-8af493a83edf\") " pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.715641 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb48d3cb-07fa-4be7-bbb2-8af493a83edf-operator-scripts\") pod \"nova-cell0-d4c6-account-create-update-4bczb\" (UID: \"fb48d3cb-07fa-4be7-bbb2-8af493a83edf\") " pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.728027 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fns6z" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.792109 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdgrn\" (UniqueName: \"kubernetes.io/projected/fb48d3cb-07fa-4be7-bbb2-8af493a83edf-kube-api-access-hdgrn\") pod \"nova-cell0-d4c6-account-create-update-4bczb\" (UID: \"fb48d3cb-07fa-4be7-bbb2-8af493a83edf\") " pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.792305 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-4dff-account-create-update-b7gr5"] Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.801169 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.805045 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.817784 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.822469 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-4dff-account-create-update-b7gr5"] Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.923591 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r556x\" (UniqueName: \"kubernetes.io/projected/41176817-8b80-4a07-832e-3957be57cf82-kube-api-access-r556x\") pod \"nova-cell1-4dff-account-create-update-b7gr5\" (UID: \"41176817-8b80-4a07-832e-3957be57cf82\") " pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" Nov 25 21:48:40 crc kubenswrapper[4910]: I1125 21:48:40.923660 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41176817-8b80-4a07-832e-3957be57cf82-operator-scripts\") pod \"nova-cell1-4dff-account-create-update-b7gr5\" (UID: \"41176817-8b80-4a07-832e-3957be57cf82\") " pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" Nov 25 21:48:41 crc kubenswrapper[4910]: I1125 21:48:41.025608 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r556x\" (UniqueName: \"kubernetes.io/projected/41176817-8b80-4a07-832e-3957be57cf82-kube-api-access-r556x\") pod \"nova-cell1-4dff-account-create-update-b7gr5\" (UID: \"41176817-8b80-4a07-832e-3957be57cf82\") " pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" Nov 25 21:48:41 crc kubenswrapper[4910]: I1125 21:48:41.025662 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41176817-8b80-4a07-832e-3957be57cf82-operator-scripts\") pod \"nova-cell1-4dff-account-create-update-b7gr5\" (UID: \"41176817-8b80-4a07-832e-3957be57cf82\") " pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" Nov 25 21:48:41 crc kubenswrapper[4910]: I1125 21:48:41.026529 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41176817-8b80-4a07-832e-3957be57cf82-operator-scripts\") pod \"nova-cell1-4dff-account-create-update-b7gr5\" (UID: \"41176817-8b80-4a07-832e-3957be57cf82\") " pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" Nov 25 21:48:41 crc kubenswrapper[4910]: I1125 21:48:41.031153 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 25 21:48:41 crc kubenswrapper[4910]: I1125 21:48:41.057967 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r556x\" (UniqueName: \"kubernetes.io/projected/41176817-8b80-4a07-832e-3957be57cf82-kube-api-access-r556x\") pod \"nova-cell1-4dff-account-create-update-b7gr5\" (UID: \"41176817-8b80-4a07-832e-3957be57cf82\") " pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" Nov 25 21:48:41 crc kubenswrapper[4910]: I1125 21:48:41.144515 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" Nov 25 21:48:43 crc kubenswrapper[4910]: I1125 21:48:43.682835 4910 generic.go:334] "Generic (PLEG): container finished" podID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerID="633551ab5b12cf043c15a2de1981631a863ed3792a2a8b8cd89951c8a8c8fc9e" exitCode=0 Nov 25 21:48:43 crc kubenswrapper[4910]: I1125 21:48:43.682949 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5810cdb3-08d2-433b-b43e-ae16a13f108c","Type":"ContainerDied","Data":"633551ab5b12cf043c15a2de1981631a863ed3792a2a8b8cd89951c8a8c8fc9e"} Nov 25 21:48:43 crc kubenswrapper[4910]: I1125 21:48:43.687166 4910 generic.go:334] "Generic (PLEG): container finished" podID="f2e4643d-59dd-4252-8b35-c6f3003aa3d0" containerID="0c983214d8e6bfa114d31ef3aae2f0cf76f47113e3011d6a00e49e39becd1303" exitCode=0 Nov 25 21:48:43 crc kubenswrapper[4910]: I1125 21:48:43.687220 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f2e4643d-59dd-4252-8b35-c6f3003aa3d0","Type":"ContainerDied","Data":"0c983214d8e6bfa114d31ef3aae2f0cf76f47113e3011d6a00e49e39becd1303"} Nov 25 21:48:44 crc kubenswrapper[4910]: I1125 21:48:44.053036 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:48:44 crc kubenswrapper[4910]: I1125 21:48:44.053414 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="d002bfa2-970b-44fc-b839-8e114323162e" containerName="glance-httpd" containerID="cri-o://57c4b90923fcd286b2e85e9646169a796ec2a28e4b29788bf5fb116a32864d6f" gracePeriod=30 Nov 25 21:48:44 crc kubenswrapper[4910]: I1125 21:48:44.053577 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="d002bfa2-970b-44fc-b839-8e114323162e" containerName="glance-log" containerID="cri-o://46411f46a843a066b53eef3ba79cdb632929ba2f24e36df33d1eabbe80b9565e" gracePeriod=30 Nov 25 21:48:44 crc kubenswrapper[4910]: I1125 21:48:44.699114 4910 generic.go:334] "Generic (PLEG): container finished" podID="d002bfa2-970b-44fc-b839-8e114323162e" containerID="46411f46a843a066b53eef3ba79cdb632929ba2f24e36df33d1eabbe80b9565e" exitCode=143 Nov 25 21:48:44 crc kubenswrapper[4910]: I1125 21:48:44.699212 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d002bfa2-970b-44fc-b839-8e114323162e","Type":"ContainerDied","Data":"46411f46a843a066b53eef3ba79cdb632929ba2f24e36df33d1eabbe80b9565e"} Nov 25 21:48:45 crc kubenswrapper[4910]: I1125 21:48:45.354938 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-58f8d7cc56-csk7l" podUID="78dc494b-f987-443a-a350-1988639b6fee" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.147:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.147:8443: connect: connection refused" Nov 25 21:48:45 crc kubenswrapper[4910]: I1125 21:48:45.355335 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.071861 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.144368 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-combined-ca-bundle\") pod \"5810cdb3-08d2-433b-b43e-ae16a13f108c\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.144446 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-scripts\") pod \"5810cdb3-08d2-433b-b43e-ae16a13f108c\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.144488 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-sg-core-conf-yaml\") pod \"5810cdb3-08d2-433b-b43e-ae16a13f108c\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.144515 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nlx6\" (UniqueName: \"kubernetes.io/projected/5810cdb3-08d2-433b-b43e-ae16a13f108c-kube-api-access-5nlx6\") pod \"5810cdb3-08d2-433b-b43e-ae16a13f108c\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.144711 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5810cdb3-08d2-433b-b43e-ae16a13f108c-run-httpd\") pod \"5810cdb3-08d2-433b-b43e-ae16a13f108c\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.144736 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5810cdb3-08d2-433b-b43e-ae16a13f108c-log-httpd\") pod \"5810cdb3-08d2-433b-b43e-ae16a13f108c\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.144791 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-config-data\") pod \"5810cdb3-08d2-433b-b43e-ae16a13f108c\" (UID: \"5810cdb3-08d2-433b-b43e-ae16a13f108c\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.145341 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5810cdb3-08d2-433b-b43e-ae16a13f108c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "5810cdb3-08d2-433b-b43e-ae16a13f108c" (UID: "5810cdb3-08d2-433b-b43e-ae16a13f108c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.146426 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5810cdb3-08d2-433b-b43e-ae16a13f108c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "5810cdb3-08d2-433b-b43e-ae16a13f108c" (UID: "5810cdb3-08d2-433b-b43e-ae16a13f108c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.152983 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5810cdb3-08d2-433b-b43e-ae16a13f108c-kube-api-access-5nlx6" (OuterVolumeSpecName: "kube-api-access-5nlx6") pod "5810cdb3-08d2-433b-b43e-ae16a13f108c" (UID: "5810cdb3-08d2-433b-b43e-ae16a13f108c"). InnerVolumeSpecName "kube-api-access-5nlx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.153055 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-scripts" (OuterVolumeSpecName: "scripts") pod "5810cdb3-08d2-433b-b43e-ae16a13f108c" (UID: "5810cdb3-08d2-433b-b43e-ae16a13f108c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.178354 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "5810cdb3-08d2-433b-b43e-ae16a13f108c" (UID: "5810cdb3-08d2-433b-b43e-ae16a13f108c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.247491 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.247722 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.247734 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nlx6\" (UniqueName: \"kubernetes.io/projected/5810cdb3-08d2-433b-b43e-ae16a13f108c-kube-api-access-5nlx6\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.247745 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5810cdb3-08d2-433b-b43e-ae16a13f108c-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.247754 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/5810cdb3-08d2-433b-b43e-ae16a13f108c-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.256865 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-config-data" (OuterVolumeSpecName: "config-data") pod "5810cdb3-08d2-433b-b43e-ae16a13f108c" (UID: "5810cdb3-08d2-433b-b43e-ae16a13f108c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.261412 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5810cdb3-08d2-433b-b43e-ae16a13f108c" (UID: "5810cdb3-08d2-433b-b43e-ae16a13f108c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.330469 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.350204 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.350273 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5810cdb3-08d2-433b-b43e-ae16a13f108c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.451168 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-httpd-run\") pod \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.451230 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-combined-ca-bundle\") pod \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.451271 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-logs\") pod \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.451506 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5vtf8\" (UniqueName: \"kubernetes.io/projected/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-kube-api-access-5vtf8\") pod \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.451529 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-config-data\") pod \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.451658 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-internal-tls-certs\") pod \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.451718 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.451747 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-scripts\") pod \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\" (UID: \"f2e4643d-59dd-4252-8b35-c6f3003aa3d0\") " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.461861 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f2e4643d-59dd-4252-8b35-c6f3003aa3d0" (UID: "f2e4643d-59dd-4252-8b35-c6f3003aa3d0"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.466278 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-logs" (OuterVolumeSpecName: "logs") pod "f2e4643d-59dd-4252-8b35-c6f3003aa3d0" (UID: "f2e4643d-59dd-4252-8b35-c6f3003aa3d0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.472069 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-kube-api-access-5vtf8" (OuterVolumeSpecName: "kube-api-access-5vtf8") pod "f2e4643d-59dd-4252-8b35-c6f3003aa3d0" (UID: "f2e4643d-59dd-4252-8b35-c6f3003aa3d0"). InnerVolumeSpecName "kube-api-access-5vtf8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.485579 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-scripts" (OuterVolumeSpecName: "scripts") pod "f2e4643d-59dd-4252-8b35-c6f3003aa3d0" (UID: "f2e4643d-59dd-4252-8b35-c6f3003aa3d0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.492412 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "f2e4643d-59dd-4252-8b35-c6f3003aa3d0" (UID: "f2e4643d-59dd-4252-8b35-c6f3003aa3d0"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.548441 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-78559"] Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.557779 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5vtf8\" (UniqueName: \"kubernetes.io/projected/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-kube-api-access-5vtf8\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.557830 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.557841 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.557851 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.557861 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.596947 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-fns6z"] Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.596564 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f2e4643d-59dd-4252-8b35-c6f3003aa3d0" (UID: "f2e4643d-59dd-4252-8b35-c6f3003aa3d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.613095 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.632006 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-config-data" (OuterVolumeSpecName: "config-data") pod "f2e4643d-59dd-4252-8b35-c6f3003aa3d0" (UID: "f2e4643d-59dd-4252-8b35-c6f3003aa3d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.653574 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "f2e4643d-59dd-4252-8b35-c6f3003aa3d0" (UID: "f2e4643d-59dd-4252-8b35-c6f3003aa3d0"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.658492 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.665103 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-sr4sn"] Nov 25 21:48:46 crc kubenswrapper[4910]: W1125 21:48:46.668077 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd8187b53_8402_4ae3_b580_5afa43f29e9f.slice/crio-7bbb00aba4524c1f2fc8e889ca54b4fb8da936d56e74253a8b4ac7cdfc0bf964 WatchSource:0}: Error finding container 7bbb00aba4524c1f2fc8e889ca54b4fb8da936d56e74253a8b4ac7cdfc0bf964: Status 404 returned error can't find the container with id 7bbb00aba4524c1f2fc8e889ca54b4fb8da936d56e74253a8b4ac7cdfc0bf964 Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.672069 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.672287 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.672436 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.672586 4910 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f2e4643d-59dd-4252-8b35-c6f3003aa3d0-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.692421 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.700550 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.761448 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-4dff-account-create-update-b7gr5"] Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.790564 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-3555-account-create-update-4wx9j"] Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.793718 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-d4c6-account-create-update-4bczb"] Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.804561 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-6575886cb7-hv9qm"] Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.825901 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"f2e4643d-59dd-4252-8b35-c6f3003aa3d0","Type":"ContainerDied","Data":"2658ec53139f23020396273c01bebe6484b769dc371e4d242598e53112f1d28b"} Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.825974 4910 scope.go:117] "RemoveContainer" containerID="0c983214d8e6bfa114d31ef3aae2f0cf76f47113e3011d6a00e49e39becd1303" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.826220 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.856172 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3555-account-create-update-4wx9j" event={"ID":"d8187b53-8402-4ae3-b580-5afa43f29e9f","Type":"ContainerStarted","Data":"7bbb00aba4524c1f2fc8e889ca54b4fb8da936d56e74253a8b4ac7cdfc0bf964"} Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.910554 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-fns6z" event={"ID":"e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80","Type":"ContainerStarted","Data":"5b89439fbafe5053560a8baa371d9a33a03d46e4e038a12d89a9f18a45eb2177"} Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.944583 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"e36f2312-81e7-4b57-9131-695681724f08","Type":"ContainerStarted","Data":"9656182e37497af47db57e03c4b5c6f1fc2c360fcecff6a6747a9ec5160d86a0"} Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.970502 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.978983 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"5810cdb3-08d2-433b-b43e-ae16a13f108c","Type":"ContainerDied","Data":"089c8b7821e29a52a7d285918936f7c5f071eb7f1bdda530378bd849c3ca9dfe"} Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.979361 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.984840 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.996353 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:48:46 crc kubenswrapper[4910]: E1125 21:48:46.996793 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="sg-core" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.996809 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="sg-core" Nov 25 21:48:46 crc kubenswrapper[4910]: E1125 21:48:46.996827 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="ceilometer-notification-agent" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.996834 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="ceilometer-notification-agent" Nov 25 21:48:46 crc kubenswrapper[4910]: E1125 21:48:46.996844 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2e4643d-59dd-4252-8b35-c6f3003aa3d0" containerName="glance-httpd" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.996850 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2e4643d-59dd-4252-8b35-c6f3003aa3d0" containerName="glance-httpd" Nov 25 21:48:46 crc kubenswrapper[4910]: E1125 21:48:46.996867 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="ceilometer-central-agent" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.996872 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="ceilometer-central-agent" Nov 25 21:48:46 crc kubenswrapper[4910]: E1125 21:48:46.996887 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2e4643d-59dd-4252-8b35-c6f3003aa3d0" containerName="glance-log" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.996892 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2e4643d-59dd-4252-8b35-c6f3003aa3d0" containerName="glance-log" Nov 25 21:48:46 crc kubenswrapper[4910]: E1125 21:48:46.996912 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="proxy-httpd" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.996918 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="proxy-httpd" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.997161 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2e4643d-59dd-4252-8b35-c6f3003aa3d0" containerName="glance-log" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.997178 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="ceilometer-notification-agent" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.997193 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="sg-core" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.997209 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="ceilometer-central-agent" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.997218 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" containerName="proxy-httpd" Nov 25 21:48:46 crc kubenswrapper[4910]: I1125 21:48:46.997226 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2e4643d-59dd-4252-8b35-c6f3003aa3d0" containerName="glance-httpd" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:46.998258 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.001524 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" event={"ID":"fb48d3cb-07fa-4be7-bbb2-8af493a83edf","Type":"ContainerStarted","Data":"2a0d23f4475a6e34ce00a9dcd60f9b2b0a14eaee5914bc27a7088b1c52b9288e"} Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.005421 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.009996 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.012837 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.627185878 podStartE2EDuration="16.012817021s" podCreationTimestamp="2025-11-25 21:48:31 +0000 UTC" firstStartedPulling="2025-11-25 21:48:32.025492862 +0000 UTC m=+1067.487969184" lastFinishedPulling="2025-11-25 21:48:45.411124005 +0000 UTC m=+1080.873600327" observedRunningTime="2025-11-25 21:48:47.000801532 +0000 UTC m=+1082.463277864" watchObservedRunningTime="2025-11-25 21:48:47.012817021 +0000 UTC m=+1082.475293343" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.015307 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-sr4sn" event={"ID":"d012ed8c-8195-4ea9-b3e3-4a3e750e8d70","Type":"ContainerStarted","Data":"87ac66e80a44617c3870b7f6794614c893f7ed13757fa31dccbcb61fb67a3082"} Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.033793 4910 scope.go:117] "RemoveContainer" containerID="2ee3018aa2cdd5ada4ecc518f96b93a45dcf57c4e8f7487fb67bd09c483a0232" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.034279 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" event={"ID":"41176817-8b80-4a07-832e-3957be57cf82","Type":"ContainerStarted","Data":"d4398e39eda834a61a761dd8fa2b877045d09620cba653bb4f30b881993b3b4d"} Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.084602 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-78559" event={"ID":"3b417522-64b1-43ad-84e9-19795c605ebf","Type":"ContainerStarted","Data":"fde7b64e424d59dc686cbab6dab8e92a486510a04e085997ef4207011a05d4d0"} Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.086415 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.086472 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.086502 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qg4hx\" (UniqueName: \"kubernetes.io/projected/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-kube-api-access-qg4hx\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.086534 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.086605 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-logs\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.086645 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.086664 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.086688 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.093023 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.133788 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.167171 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.171625 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.178672 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190377 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190428 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190448 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190473 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-scripts\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190488 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-config-data\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190510 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-run-httpd\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190538 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-log-httpd\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190573 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190593 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190618 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190642 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qg4hx\" (UniqueName: \"kubernetes.io/projected/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-kube-api-access-qg4hx\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190673 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190701 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190726 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2w28q\" (UniqueName: \"kubernetes.io/projected/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-kube-api-access-2w28q\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.190795 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-logs\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.191398 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-logs\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.191603 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.200884 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.203073 4910 scope.go:117] "RemoveContainer" containerID="b83067cba9f0e3007f91dc6c33e8b3c46de73ab23b9538b0e594d8dcef535135" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.203578 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.203761 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.204055 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.212705 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.214658 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-scripts\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.215692 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-config-data\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.222593 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.260170 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qg4hx\" (UniqueName: \"kubernetes.io/projected/b9542bd3-e5b7-44e2-84bb-11b34d1fc44b-kube-api-access-qg4hx\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.295178 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.295239 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2w28q\" (UniqueName: \"kubernetes.io/projected/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-kube-api-access-2w28q\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.295384 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-scripts\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.295412 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-config-data\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.295442 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-run-httpd\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.295486 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-log-httpd\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.295540 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.300628 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.305712 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-run-httpd\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.305843 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-log-httpd\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.308736 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-scripts\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.310124 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5810cdb3-08d2-433b-b43e-ae16a13f108c" path="/var/lib/kubelet/pods/5810cdb3-08d2-433b-b43e-ae16a13f108c/volumes" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.311059 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2e4643d-59dd-4252-8b35-c6f3003aa3d0" path="/var/lib/kubelet/pods/f2e4643d-59dd-4252-8b35-c6f3003aa3d0/volumes" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.314768 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-config-data\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.337750 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2w28q\" (UniqueName: \"kubernetes.io/projected/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-kube-api-access-2w28q\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.358319 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.363627 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b\") " pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.491664 4910 scope.go:117] "RemoveContainer" containerID="c03ac0d89a8ae2ab92700224308d02b40b3a1add69c76e5af89f688958bd7d92" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.636174 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.637896 4910 scope.go:117] "RemoveContainer" containerID="633551ab5b12cf043c15a2de1981631a863ed3792a2a8b8cd89951c8a8c8fc9e" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.671346 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:48:47 crc kubenswrapper[4910]: I1125 21:48:47.742424 4910 scope.go:117] "RemoveContainer" containerID="122cff3a8c7d363474cb5a00f4df0302716e6a8a476575acc5474e6e1e7d5588" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.087000 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.183638 4910 generic.go:334] "Generic (PLEG): container finished" podID="d002bfa2-970b-44fc-b839-8e114323162e" containerID="57c4b90923fcd286b2e85e9646169a796ec2a28e4b29788bf5fb116a32864d6f" exitCode=0 Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.183740 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d002bfa2-970b-44fc-b839-8e114323162e","Type":"ContainerDied","Data":"57c4b90923fcd286b2e85e9646169a796ec2a28e4b29788bf5fb116a32864d6f"} Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.183775 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d002bfa2-970b-44fc-b839-8e114323162e","Type":"ContainerDied","Data":"2d236b04befb235c2913dc07358dcaac8eefe34367627de30cb431f5aefa1e87"} Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.183826 4910 scope.go:117] "RemoveContainer" containerID="57c4b90923fcd286b2e85e9646169a796ec2a28e4b29788bf5fb116a32864d6f" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.183982 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.204531 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" event={"ID":"fb48d3cb-07fa-4be7-bbb2-8af493a83edf","Type":"ContainerStarted","Data":"56ce33239df4389259c16204e59d07a81c460822f7d19ea02b3a267d08780f3a"} Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.208892 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6575886cb7-hv9qm" event={"ID":"8028bd01-f5f2-4c20-9f51-c6a7e06571fd","Type":"ContainerStarted","Data":"0b57f06bd560d7ea459ad7894434e0cb56c2c083f9d37bde2a3695556689aed0"} Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.208936 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6575886cb7-hv9qm" event={"ID":"8028bd01-f5f2-4c20-9f51-c6a7e06571fd","Type":"ContainerStarted","Data":"524ed6219d5a142353f0f864e3d8dbeb3ea8c10701cb536c8aa9935e0ebc71a2"} Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.229509 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-combined-ca-bundle\") pod \"d002bfa2-970b-44fc-b839-8e114323162e\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.229543 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89h7x\" (UniqueName: \"kubernetes.io/projected/d002bfa2-970b-44fc-b839-8e114323162e-kube-api-access-89h7x\") pod \"d002bfa2-970b-44fc-b839-8e114323162e\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.229599 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-scripts\") pod \"d002bfa2-970b-44fc-b839-8e114323162e\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.229737 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d002bfa2-970b-44fc-b839-8e114323162e-httpd-run\") pod \"d002bfa2-970b-44fc-b839-8e114323162e\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.229783 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d002bfa2-970b-44fc-b839-8e114323162e-logs\") pod \"d002bfa2-970b-44fc-b839-8e114323162e\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.229773 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3555-account-create-update-4wx9j" event={"ID":"d8187b53-8402-4ae3-b580-5afa43f29e9f","Type":"ContainerStarted","Data":"e72efc6d386a0a844a6066db17474b506b571b83a588d2479b71f4b63625832d"} Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.229806 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-public-tls-certs\") pod \"d002bfa2-970b-44fc-b839-8e114323162e\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.229842 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-config-data\") pod \"d002bfa2-970b-44fc-b839-8e114323162e\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.229861 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"d002bfa2-970b-44fc-b839-8e114323162e\" (UID: \"d002bfa2-970b-44fc-b839-8e114323162e\") " Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.231108 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d002bfa2-970b-44fc-b839-8e114323162e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d002bfa2-970b-44fc-b839-8e114323162e" (UID: "d002bfa2-970b-44fc-b839-8e114323162e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.232358 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d002bfa2-970b-44fc-b839-8e114323162e-logs" (OuterVolumeSpecName: "logs") pod "d002bfa2-970b-44fc-b839-8e114323162e" (UID: "d002bfa2-970b-44fc-b839-8e114323162e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.241659 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" podStartSLOduration=8.241630904 podStartE2EDuration="8.241630904s" podCreationTimestamp="2025-11-25 21:48:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:48.225584169 +0000 UTC m=+1083.688060501" watchObservedRunningTime="2025-11-25 21:48:48.241630904 +0000 UTC m=+1083.704107226" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.248188 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "d002bfa2-970b-44fc-b839-8e114323162e" (UID: "d002bfa2-970b-44fc-b839-8e114323162e"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.260254 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d002bfa2-970b-44fc-b839-8e114323162e-kube-api-access-89h7x" (OuterVolumeSpecName: "kube-api-access-89h7x") pod "d002bfa2-970b-44fc-b839-8e114323162e" (UID: "d002bfa2-970b-44fc-b839-8e114323162e"). InnerVolumeSpecName "kube-api-access-89h7x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.260457 4910 scope.go:117] "RemoveContainer" containerID="46411f46a843a066b53eef3ba79cdb632929ba2f24e36df33d1eabbe80b9565e" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.260664 4910 generic.go:334] "Generic (PLEG): container finished" podID="e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80" containerID="427cb77a54c6e69a974e8845021f58e8fe311a2d9eb4e2236253dbb18173858f" exitCode=0 Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.262418 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-fns6z" event={"ID":"e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80","Type":"ContainerDied","Data":"427cb77a54c6e69a974e8845021f58e8fe311a2d9eb4e2236253dbb18173858f"} Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.274044 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-sr4sn" event={"ID":"d012ed8c-8195-4ea9-b3e3-4a3e750e8d70","Type":"ContainerStarted","Data":"34cae767d7c53b22041356fd6c63ea50c31a7833801a5d2a639ab77d021aba31"} Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.306547 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" event={"ID":"41176817-8b80-4a07-832e-3957be57cf82","Type":"ContainerStarted","Data":"5b6355ce859db2972832c6cf53b1bd1aad9f41eec778230b108da7ee32408d4b"} Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.314127 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-scripts" (OuterVolumeSpecName: "scripts") pod "d002bfa2-970b-44fc-b839-8e114323162e" (UID: "d002bfa2-970b-44fc-b839-8e114323162e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.331375 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-78559" event={"ID":"3b417522-64b1-43ad-84e9-19795c605ebf","Type":"ContainerStarted","Data":"328d173f0b4414fb83dcd47c83b8e7cc6508063c640779c31ee1e7d7c79e664a"} Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.344267 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-3555-account-create-update-4wx9j" podStartSLOduration=8.344204296000001 podStartE2EDuration="8.344204296s" podCreationTimestamp="2025-11-25 21:48:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:48.248085606 +0000 UTC m=+1083.710561938" watchObservedRunningTime="2025-11-25 21:48:48.344204296 +0000 UTC m=+1083.806680658" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.353450 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d002bfa2-970b-44fc-b839-8e114323162e" (UID: "d002bfa2-970b-44fc-b839-8e114323162e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.354416 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d002bfa2-970b-44fc-b839-8e114323162e" (UID: "d002bfa2-970b-44fc-b839-8e114323162e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.362459 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-config-data" (OuterVolumeSpecName: "config-data") pod "d002bfa2-970b-44fc-b839-8e114323162e" (UID: "d002bfa2-970b-44fc-b839-8e114323162e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.407853 4910 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d002bfa2-970b-44fc-b839-8e114323162e-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.407889 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d002bfa2-970b-44fc-b839-8e114323162e-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.407898 4910 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.407924 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.407958 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.407982 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.407992 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89h7x\" (UniqueName: \"kubernetes.io/projected/d002bfa2-970b-44fc-b839-8e114323162e-kube-api-access-89h7x\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.408001 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d002bfa2-970b-44fc-b839-8e114323162e-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.440888 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" podStartSLOduration=8.44085202 podStartE2EDuration="8.44085202s" podCreationTimestamp="2025-11-25 21:48:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:48.3322858 +0000 UTC m=+1083.794762132" watchObservedRunningTime="2025-11-25 21:48:48.44085202 +0000 UTC m=+1083.903328332" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.450778 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.452029 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-78559" podStartSLOduration=8.452001326 podStartE2EDuration="8.452001326s" podCreationTimestamp="2025-11-25 21:48:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:48.360688853 +0000 UTC m=+1083.823165175" watchObservedRunningTime="2025-11-25 21:48:48.452001326 +0000 UTC m=+1083.914477648" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.474592 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.495031 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.510056 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:48 crc kubenswrapper[4910]: W1125 21:48:48.592395 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf56ecdbd_f9cb_4de2_bfbd_382ee3dddd7c.slice/crio-82ad0a93a7aedc77e586d06fdc330252bbe273ec9675e54053c9e6ea6a3a62d9 WatchSource:0}: Error finding container 82ad0a93a7aedc77e586d06fdc330252bbe273ec9675e54053c9e6ea6a3a62d9: Status 404 returned error can't find the container with id 82ad0a93a7aedc77e586d06fdc330252bbe273ec9675e54053c9e6ea6a3a62d9 Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.623041 4910 scope.go:117] "RemoveContainer" containerID="57c4b90923fcd286b2e85e9646169a796ec2a28e4b29788bf5fb116a32864d6f" Nov 25 21:48:48 crc kubenswrapper[4910]: E1125 21:48:48.623518 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57c4b90923fcd286b2e85e9646169a796ec2a28e4b29788bf5fb116a32864d6f\": container with ID starting with 57c4b90923fcd286b2e85e9646169a796ec2a28e4b29788bf5fb116a32864d6f not found: ID does not exist" containerID="57c4b90923fcd286b2e85e9646169a796ec2a28e4b29788bf5fb116a32864d6f" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.623584 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57c4b90923fcd286b2e85e9646169a796ec2a28e4b29788bf5fb116a32864d6f"} err="failed to get container status \"57c4b90923fcd286b2e85e9646169a796ec2a28e4b29788bf5fb116a32864d6f\": rpc error: code = NotFound desc = could not find container \"57c4b90923fcd286b2e85e9646169a796ec2a28e4b29788bf5fb116a32864d6f\": container with ID starting with 57c4b90923fcd286b2e85e9646169a796ec2a28e4b29788bf5fb116a32864d6f not found: ID does not exist" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.623623 4910 scope.go:117] "RemoveContainer" containerID="46411f46a843a066b53eef3ba79cdb632929ba2f24e36df33d1eabbe80b9565e" Nov 25 21:48:48 crc kubenswrapper[4910]: E1125 21:48:48.624205 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"46411f46a843a066b53eef3ba79cdb632929ba2f24e36df33d1eabbe80b9565e\": container with ID starting with 46411f46a843a066b53eef3ba79cdb632929ba2f24e36df33d1eabbe80b9565e not found: ID does not exist" containerID="46411f46a843a066b53eef3ba79cdb632929ba2f24e36df33d1eabbe80b9565e" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.624254 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"46411f46a843a066b53eef3ba79cdb632929ba2f24e36df33d1eabbe80b9565e"} err="failed to get container status \"46411f46a843a066b53eef3ba79cdb632929ba2f24e36df33d1eabbe80b9565e\": rpc error: code = NotFound desc = could not find container \"46411f46a843a066b53eef3ba79cdb632929ba2f24e36df33d1eabbe80b9565e\": container with ID starting with 46411f46a843a066b53eef3ba79cdb632929ba2f24e36df33d1eabbe80b9565e not found: ID does not exist" Nov 25 21:48:48 crc kubenswrapper[4910]: W1125 21:48:48.627938 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb9542bd3_e5b7_44e2_84bb_11b34d1fc44b.slice/crio-5dd41c3c2851ffee3ba1233f853faaa95fde38de3ddae8bdff1fecba5089f44f WatchSource:0}: Error finding container 5dd41c3c2851ffee3ba1233f853faaa95fde38de3ddae8bdff1fecba5089f44f: Status 404 returned error can't find the container with id 5dd41c3c2851ffee3ba1233f853faaa95fde38de3ddae8bdff1fecba5089f44f Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.647441 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.658148 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.687181 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:48:48 crc kubenswrapper[4910]: E1125 21:48:48.687592 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d002bfa2-970b-44fc-b839-8e114323162e" containerName="glance-log" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.687610 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d002bfa2-970b-44fc-b839-8e114323162e" containerName="glance-log" Nov 25 21:48:48 crc kubenswrapper[4910]: E1125 21:48:48.687652 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d002bfa2-970b-44fc-b839-8e114323162e" containerName="glance-httpd" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.687660 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d002bfa2-970b-44fc-b839-8e114323162e" containerName="glance-httpd" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.687854 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d002bfa2-970b-44fc-b839-8e114323162e" containerName="glance-log" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.687877 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d002bfa2-970b-44fc-b839-8e114323162e" containerName="glance-httpd" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.688981 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.693938 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.694263 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.743065 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.825618 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.826065 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.826154 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-config-data\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.826189 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.826258 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-scripts\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.826280 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-logs\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.826707 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hbj98\" (UniqueName: \"kubernetes.io/projected/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-kube-api-access-hbj98\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.826762 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.928274 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-config-data\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.928342 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.928391 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-scripts\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.928410 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-logs\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.928440 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hbj98\" (UniqueName: \"kubernetes.io/projected/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-kube-api-access-hbj98\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.928463 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.928521 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.928542 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.928792 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.928838 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.929712 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-logs\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.941615 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-scripts\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.943229 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.943399 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-config-data\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.948725 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hbj98\" (UniqueName: \"kubernetes.io/projected/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-kube-api-access-hbj98\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.951021 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b3e95ca-7b13-4baf-98f1-465aa3b31a2c-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:48 crc kubenswrapper[4910]: I1125 21:48:48.963279 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-external-api-0\" (UID: \"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c\") " pod="openstack/glance-default-external-api-0" Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.022940 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.235740 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d002bfa2-970b-44fc-b839-8e114323162e" path="/var/lib/kubelet/pods/d002bfa2-970b-44fc-b839-8e114323162e/volumes" Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.397785 4910 generic.go:334] "Generic (PLEG): container finished" podID="41176817-8b80-4a07-832e-3957be57cf82" containerID="5b6355ce859db2972832c6cf53b1bd1aad9f41eec778230b108da7ee32408d4b" exitCode=0 Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.397850 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" event={"ID":"41176817-8b80-4a07-832e-3957be57cf82","Type":"ContainerDied","Data":"5b6355ce859db2972832c6cf53b1bd1aad9f41eec778230b108da7ee32408d4b"} Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.404329 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-6575886cb7-hv9qm" event={"ID":"8028bd01-f5f2-4c20-9f51-c6a7e06571fd","Type":"ContainerStarted","Data":"00d81ed9e92ba721ee9bd0a0559a36b7a0e9cc478b626b21a87f075f1b3eb76b"} Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.405289 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.405314 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.411625 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c","Type":"ContainerStarted","Data":"82ad0a93a7aedc77e586d06fdc330252bbe273ec9675e54053c9e6ea6a3a62d9"} Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.430177 4910 generic.go:334] "Generic (PLEG): container finished" podID="d012ed8c-8195-4ea9-b3e3-4a3e750e8d70" containerID="34cae767d7c53b22041356fd6c63ea50c31a7833801a5d2a639ab77d021aba31" exitCode=0 Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.430251 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-sr4sn" event={"ID":"d012ed8c-8195-4ea9-b3e3-4a3e750e8d70","Type":"ContainerDied","Data":"34cae767d7c53b22041356fd6c63ea50c31a7833801a5d2a639ab77d021aba31"} Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.439523 4910 generic.go:334] "Generic (PLEG): container finished" podID="3b417522-64b1-43ad-84e9-19795c605ebf" containerID="328d173f0b4414fb83dcd47c83b8e7cc6508063c640779c31ee1e7d7c79e664a" exitCode=0 Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.440816 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-78559" event={"ID":"3b417522-64b1-43ad-84e9-19795c605ebf","Type":"ContainerDied","Data":"328d173f0b4414fb83dcd47c83b8e7cc6508063c640779c31ee1e7d7c79e664a"} Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.458489 4910 generic.go:334] "Generic (PLEG): container finished" podID="fb48d3cb-07fa-4be7-bbb2-8af493a83edf" containerID="56ce33239df4389259c16204e59d07a81c460822f7d19ea02b3a267d08780f3a" exitCode=0 Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.458763 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" event={"ID":"fb48d3cb-07fa-4be7-bbb2-8af493a83edf","Type":"ContainerDied","Data":"56ce33239df4389259c16204e59d07a81c460822f7d19ea02b3a267d08780f3a"} Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.482878 4910 generic.go:334] "Generic (PLEG): container finished" podID="d8187b53-8402-4ae3-b580-5afa43f29e9f" containerID="e72efc6d386a0a844a6066db17474b506b571b83a588d2479b71f4b63625832d" exitCode=0 Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.483015 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3555-account-create-update-4wx9j" event={"ID":"d8187b53-8402-4ae3-b580-5afa43f29e9f","Type":"ContainerDied","Data":"e72efc6d386a0a844a6066db17474b506b571b83a588d2479b71f4b63625832d"} Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.485689 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-6575886cb7-hv9qm" podStartSLOduration=13.485672562 podStartE2EDuration="13.485672562s" podCreationTimestamp="2025-11-25 21:48:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:49.44866246 +0000 UTC m=+1084.911138782" watchObservedRunningTime="2025-11-25 21:48:49.485672562 +0000 UTC m=+1084.948148884" Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.487067 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b","Type":"ContainerStarted","Data":"5dd41c3c2851ffee3ba1233f853faaa95fde38de3ddae8bdff1fecba5089f44f"} Nov 25 21:48:49 crc kubenswrapper[4910]: I1125 21:48:49.562546 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 25 21:48:49 crc kubenswrapper[4910]: W1125 21:48:49.567433 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5b3e95ca_7b13_4baf_98f1_465aa3b31a2c.slice/crio-07556eed7d239545b1f2a2fe638055f5abdc487b76345c287747c1cad964eec6 WatchSource:0}: Error finding container 07556eed7d239545b1f2a2fe638055f5abdc487b76345c287747c1cad964eec6: Status 404 returned error can't find the container with id 07556eed7d239545b1f2a2fe638055f5abdc487b76345c287747c1cad964eec6 Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.093191 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fns6z" Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.177142 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dnskh\" (UniqueName: \"kubernetes.io/projected/e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80-kube-api-access-dnskh\") pod \"e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80\" (UID: \"e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80\") " Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.177303 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80-operator-scripts\") pod \"e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80\" (UID: \"e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80\") " Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.178190 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80" (UID: "e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.190514 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80-kube-api-access-dnskh" (OuterVolumeSpecName: "kube-api-access-dnskh") pod "e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80" (UID: "e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80"). InnerVolumeSpecName "kube-api-access-dnskh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.280004 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dnskh\" (UniqueName: \"kubernetes.io/projected/e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80-kube-api-access-dnskh\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.280054 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.340470 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-sr4sn" Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.497094 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5tcc\" (UniqueName: \"kubernetes.io/projected/d012ed8c-8195-4ea9-b3e3-4a3e750e8d70-kube-api-access-j5tcc\") pod \"d012ed8c-8195-4ea9-b3e3-4a3e750e8d70\" (UID: \"d012ed8c-8195-4ea9-b3e3-4a3e750e8d70\") " Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.499091 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d012ed8c-8195-4ea9-b3e3-4a3e750e8d70-operator-scripts\") pod \"d012ed8c-8195-4ea9-b3e3-4a3e750e8d70\" (UID: \"d012ed8c-8195-4ea9-b3e3-4a3e750e8d70\") " Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.500955 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d012ed8c-8195-4ea9-b3e3-4a3e750e8d70-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d012ed8c-8195-4ea9-b3e3-4a3e750e8d70" (UID: "d012ed8c-8195-4ea9-b3e3-4a3e750e8d70"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.526258 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d012ed8c-8195-4ea9-b3e3-4a3e750e8d70-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.553563 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d012ed8c-8195-4ea9-b3e3-4a3e750e8d70-kube-api-access-j5tcc" (OuterVolumeSpecName: "kube-api-access-j5tcc") pod "d012ed8c-8195-4ea9-b3e3-4a3e750e8d70" (UID: "d012ed8c-8195-4ea9-b3e3-4a3e750e8d70"). InnerVolumeSpecName "kube-api-access-j5tcc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.622782 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b","Type":"ContainerStarted","Data":"6d5767a210c29862d0b4b1a4d19f0d9ef0355e5cb8fa9c59cce877b1824ef1bf"} Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.630003 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5tcc\" (UniqueName: \"kubernetes.io/projected/d012ed8c-8195-4ea9-b3e3-4a3e750e8d70-kube-api-access-j5tcc\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.630077 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c","Type":"ContainerStarted","Data":"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef"} Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.630109 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c","Type":"ContainerStarted","Data":"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3"} Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.696809 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-sr4sn" event={"ID":"d012ed8c-8195-4ea9-b3e3-4a3e750e8d70","Type":"ContainerDied","Data":"87ac66e80a44617c3870b7f6794614c893f7ed13757fa31dccbcb61fb67a3082"} Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.696863 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="87ac66e80a44617c3870b7f6794614c893f7ed13757fa31dccbcb61fb67a3082" Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.696964 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-sr4sn" Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.739688 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c","Type":"ContainerStarted","Data":"07556eed7d239545b1f2a2fe638055f5abdc487b76345c287747c1cad964eec6"} Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.759362 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-fns6z" event={"ID":"e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80","Type":"ContainerDied","Data":"5b89439fbafe5053560a8baa371d9a33a03d46e4e038a12d89a9f18a45eb2177"} Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.759434 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b89439fbafe5053560a8baa371d9a33a03d46e4e038a12d89a9f18a45eb2177" Nov 25 21:48:50 crc kubenswrapper[4910]: I1125 21:48:50.759513 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-fns6z" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.621851 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.629979 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r556x\" (UniqueName: \"kubernetes.io/projected/41176817-8b80-4a07-832e-3957be57cf82-kube-api-access-r556x\") pod \"41176817-8b80-4a07-832e-3957be57cf82\" (UID: \"41176817-8b80-4a07-832e-3957be57cf82\") " Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.630186 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41176817-8b80-4a07-832e-3957be57cf82-operator-scripts\") pod \"41176817-8b80-4a07-832e-3957be57cf82\" (UID: \"41176817-8b80-4a07-832e-3957be57cf82\") " Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.648139 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41176817-8b80-4a07-832e-3957be57cf82-kube-api-access-r556x" (OuterVolumeSpecName: "kube-api-access-r556x") pod "41176817-8b80-4a07-832e-3957be57cf82" (UID: "41176817-8b80-4a07-832e-3957be57cf82"). InnerVolumeSpecName "kube-api-access-r556x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.657183 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/41176817-8b80-4a07-832e-3957be57cf82-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "41176817-8b80-4a07-832e-3957be57cf82" (UID: "41176817-8b80-4a07-832e-3957be57cf82"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.732117 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/41176817-8b80-4a07-832e-3957be57cf82-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.732601 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r556x\" (UniqueName: \"kubernetes.io/projected/41176817-8b80-4a07-832e-3957be57cf82-kube-api-access-r556x\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.780688 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.799434 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-78559" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.799659 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" event={"ID":"41176817-8b80-4a07-832e-3957be57cf82","Type":"ContainerDied","Data":"d4398e39eda834a61a761dd8fa2b877045d09620cba653bb4f30b881993b3b4d"} Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.799692 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d4398e39eda834a61a761dd8fa2b877045d09620cba653bb4f30b881993b3b4d" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.799731 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-4dff-account-create-update-b7gr5" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.817813 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3555-account-create-update-4wx9j" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.818923 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-78559" event={"ID":"3b417522-64b1-43ad-84e9-19795c605ebf","Type":"ContainerDied","Data":"fde7b64e424d59dc686cbab6dab8e92a486510a04e085997ef4207011a05d4d0"} Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.818964 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fde7b64e424d59dc686cbab6dab8e92a486510a04e085997ef4207011a05d4d0" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.819049 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-78559" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.823761 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" event={"ID":"fb48d3cb-07fa-4be7-bbb2-8af493a83edf","Type":"ContainerDied","Data":"2a0d23f4475a6e34ce00a9dcd60f9b2b0a14eaee5914bc27a7088b1c52b9288e"} Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.823797 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2a0d23f4475a6e34ce00a9dcd60f9b2b0a14eaee5914bc27a7088b1c52b9288e" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.823863 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-d4c6-account-create-update-4bczb" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.852210 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c","Type":"ContainerStarted","Data":"f36961145dbecdd52c8a6238e1d3b1ea611ccbe1c113c7555c636f0c52513f23"} Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.856030 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-3555-account-create-update-4wx9j" event={"ID":"d8187b53-8402-4ae3-b580-5afa43f29e9f","Type":"ContainerDied","Data":"7bbb00aba4524c1f2fc8e889ca54b4fb8da936d56e74253a8b4ac7cdfc0bf964"} Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.856068 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7bbb00aba4524c1f2fc8e889ca54b4fb8da936d56e74253a8b4ac7cdfc0bf964" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.856081 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-3555-account-create-update-4wx9j" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.878393 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"b9542bd3-e5b7-44e2-84bb-11b34d1fc44b","Type":"ContainerStarted","Data":"87d8fe9698ad4658e2092cc84c1acede112104d110c79a788e39cf593840723e"} Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.938429 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4t6m\" (UniqueName: \"kubernetes.io/projected/d8187b53-8402-4ae3-b580-5afa43f29e9f-kube-api-access-x4t6m\") pod \"d8187b53-8402-4ae3-b580-5afa43f29e9f\" (UID: \"d8187b53-8402-4ae3-b580-5afa43f29e9f\") " Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.939208 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46mmf\" (UniqueName: \"kubernetes.io/projected/3b417522-64b1-43ad-84e9-19795c605ebf-kube-api-access-46mmf\") pod \"3b417522-64b1-43ad-84e9-19795c605ebf\" (UID: \"3b417522-64b1-43ad-84e9-19795c605ebf\") " Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.939797 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b417522-64b1-43ad-84e9-19795c605ebf-operator-scripts\") pod \"3b417522-64b1-43ad-84e9-19795c605ebf\" (UID: \"3b417522-64b1-43ad-84e9-19795c605ebf\") " Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.939915 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdgrn\" (UniqueName: \"kubernetes.io/projected/fb48d3cb-07fa-4be7-bbb2-8af493a83edf-kube-api-access-hdgrn\") pod \"fb48d3cb-07fa-4be7-bbb2-8af493a83edf\" (UID: \"fb48d3cb-07fa-4be7-bbb2-8af493a83edf\") " Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.940212 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8187b53-8402-4ae3-b580-5afa43f29e9f-operator-scripts\") pod \"d8187b53-8402-4ae3-b580-5afa43f29e9f\" (UID: \"d8187b53-8402-4ae3-b580-5afa43f29e9f\") " Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.940326 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb48d3cb-07fa-4be7-bbb2-8af493a83edf-operator-scripts\") pod \"fb48d3cb-07fa-4be7-bbb2-8af493a83edf\" (UID: \"fb48d3cb-07fa-4be7-bbb2-8af493a83edf\") " Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.940663 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b417522-64b1-43ad-84e9-19795c605ebf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3b417522-64b1-43ad-84e9-19795c605ebf" (UID: "3b417522-64b1-43ad-84e9-19795c605ebf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.941074 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb48d3cb-07fa-4be7-bbb2-8af493a83edf-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fb48d3cb-07fa-4be7-bbb2-8af493a83edf" (UID: "fb48d3cb-07fa-4be7-bbb2-8af493a83edf"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.943426 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3b417522-64b1-43ad-84e9-19795c605ebf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.943632 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fb48d3cb-07fa-4be7-bbb2-8af493a83edf-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.943840 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8187b53-8402-4ae3-b580-5afa43f29e9f-kube-api-access-x4t6m" (OuterVolumeSpecName: "kube-api-access-x4t6m") pod "d8187b53-8402-4ae3-b580-5afa43f29e9f" (UID: "d8187b53-8402-4ae3-b580-5afa43f29e9f"). InnerVolumeSpecName "kube-api-access-x4t6m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.944191 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8187b53-8402-4ae3-b580-5afa43f29e9f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d8187b53-8402-4ae3-b580-5afa43f29e9f" (UID: "d8187b53-8402-4ae3-b580-5afa43f29e9f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.944647 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b417522-64b1-43ad-84e9-19795c605ebf-kube-api-access-46mmf" (OuterVolumeSpecName: "kube-api-access-46mmf") pod "3b417522-64b1-43ad-84e9-19795c605ebf" (UID: "3b417522-64b1-43ad-84e9-19795c605ebf"). InnerVolumeSpecName "kube-api-access-46mmf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:51 crc kubenswrapper[4910]: I1125 21:48:51.944774 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb48d3cb-07fa-4be7-bbb2-8af493a83edf-kube-api-access-hdgrn" (OuterVolumeSpecName: "kube-api-access-hdgrn") pod "fb48d3cb-07fa-4be7-bbb2-8af493a83edf" (UID: "fb48d3cb-07fa-4be7-bbb2-8af493a83edf"). InnerVolumeSpecName "kube-api-access-hdgrn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:52 crc kubenswrapper[4910]: I1125 21:48:52.047118 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4t6m\" (UniqueName: \"kubernetes.io/projected/d8187b53-8402-4ae3-b580-5afa43f29e9f-kube-api-access-x4t6m\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:52 crc kubenswrapper[4910]: I1125 21:48:52.047161 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46mmf\" (UniqueName: \"kubernetes.io/projected/3b417522-64b1-43ad-84e9-19795c605ebf-kube-api-access-46mmf\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:52 crc kubenswrapper[4910]: I1125 21:48:52.047174 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdgrn\" (UniqueName: \"kubernetes.io/projected/fb48d3cb-07fa-4be7-bbb2-8af493a83edf-kube-api-access-hdgrn\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:52 crc kubenswrapper[4910]: I1125 21:48:52.047187 4910 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d8187b53-8402-4ae3-b580-5afa43f29e9f-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:52 crc kubenswrapper[4910]: I1125 21:48:52.658569 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.658545006 podStartE2EDuration="6.658545006s" podCreationTimestamp="2025-11-25 21:48:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:51.90227987 +0000 UTC m=+1087.364756212" watchObservedRunningTime="2025-11-25 21:48:52.658545006 +0000 UTC m=+1088.121021328" Nov 25 21:48:52 crc kubenswrapper[4910]: I1125 21:48:52.887593 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c","Type":"ContainerStarted","Data":"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d"} Nov 25 21:48:52 crc kubenswrapper[4910]: I1125 21:48:52.889079 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"5b3e95ca-7b13-4baf-98f1-465aa3b31a2c","Type":"ContainerStarted","Data":"59267d101c7e1d36ef0c4acfc733a40e845f9a6aebb343aa5fa4f60763e08451"} Nov 25 21:48:52 crc kubenswrapper[4910]: I1125 21:48:52.891720 4910 generic.go:334] "Generic (PLEG): container finished" podID="78dc494b-f987-443a-a350-1988639b6fee" containerID="3d1388e3f9936a5e6838117a86dbadfac66d243bb6179a027f0f0a595cdb55ae" exitCode=137 Nov 25 21:48:52 crc kubenswrapper[4910]: I1125 21:48:52.892194 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58f8d7cc56-csk7l" event={"ID":"78dc494b-f987-443a-a350-1988639b6fee","Type":"ContainerDied","Data":"3d1388e3f9936a5e6838117a86dbadfac66d243bb6179a027f0f0a595cdb55ae"} Nov 25 21:48:52 crc kubenswrapper[4910]: I1125 21:48:52.911846 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.911822946 podStartE2EDuration="4.911822946s" podCreationTimestamp="2025-11-25 21:48:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:48:52.907293956 +0000 UTC m=+1088.369770278" watchObservedRunningTime="2025-11-25 21:48:52.911822946 +0000 UTC m=+1088.374299258" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.098686 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.098763 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.638831 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.802501 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/78dc494b-f987-443a-a350-1988639b6fee-config-data\") pod \"78dc494b-f987-443a-a350-1988639b6fee\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.802818 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78dc494b-f987-443a-a350-1988639b6fee-scripts\") pod \"78dc494b-f987-443a-a350-1988639b6fee\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.802838 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-combined-ca-bundle\") pod \"78dc494b-f987-443a-a350-1988639b6fee\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.803046 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-horizon-secret-key\") pod \"78dc494b-f987-443a-a350-1988639b6fee\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.803618 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vpgsz\" (UniqueName: \"kubernetes.io/projected/78dc494b-f987-443a-a350-1988639b6fee-kube-api-access-vpgsz\") pod \"78dc494b-f987-443a-a350-1988639b6fee\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.803720 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/78dc494b-f987-443a-a350-1988639b6fee-logs\") pod \"78dc494b-f987-443a-a350-1988639b6fee\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.803756 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-horizon-tls-certs\") pod \"78dc494b-f987-443a-a350-1988639b6fee\" (UID: \"78dc494b-f987-443a-a350-1988639b6fee\") " Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.804664 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78dc494b-f987-443a-a350-1988639b6fee-logs" (OuterVolumeSpecName: "logs") pod "78dc494b-f987-443a-a350-1988639b6fee" (UID: "78dc494b-f987-443a-a350-1988639b6fee"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.809459 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "78dc494b-f987-443a-a350-1988639b6fee" (UID: "78dc494b-f987-443a-a350-1988639b6fee"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.828786 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78dc494b-f987-443a-a350-1988639b6fee-kube-api-access-vpgsz" (OuterVolumeSpecName: "kube-api-access-vpgsz") pod "78dc494b-f987-443a-a350-1988639b6fee" (UID: "78dc494b-f987-443a-a350-1988639b6fee"). InnerVolumeSpecName "kube-api-access-vpgsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.830915 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78dc494b-f987-443a-a350-1988639b6fee-scripts" (OuterVolumeSpecName: "scripts") pod "78dc494b-f987-443a-a350-1988639b6fee" (UID: "78dc494b-f987-443a-a350-1988639b6fee"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.836618 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "78dc494b-f987-443a-a350-1988639b6fee" (UID: "78dc494b-f987-443a-a350-1988639b6fee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.863920 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78dc494b-f987-443a-a350-1988639b6fee-config-data" (OuterVolumeSpecName: "config-data") pod "78dc494b-f987-443a-a350-1988639b6fee" (UID: "78dc494b-f987-443a-a350-1988639b6fee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.865520 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "78dc494b-f987-443a-a350-1988639b6fee" (UID: "78dc494b-f987-443a-a350-1988639b6fee"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.902148 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-58f8d7cc56-csk7l" event={"ID":"78dc494b-f987-443a-a350-1988639b6fee","Type":"ContainerDied","Data":"3abf372eb70ee21335b052fff6640229310346d6931e7d3ce88098687041568e"} Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.902206 4910 scope.go:117] "RemoveContainer" containerID="3a6d55cf981774e9e32bdc5ee3a9fbb53ace068261c91c2d651a817d4ca4dc1f" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.902396 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-58f8d7cc56-csk7l" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.905574 4910 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.905748 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/78dc494b-f987-443a-a350-1988639b6fee-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.906588 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78dc494b-f987-443a-a350-1988639b6fee-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.906604 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.906614 4910 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/78dc494b-f987-443a-a350-1988639b6fee-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.906632 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vpgsz\" (UniqueName: \"kubernetes.io/projected/78dc494b-f987-443a-a350-1988639b6fee-kube-api-access-vpgsz\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.906643 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/78dc494b-f987-443a-a350-1988639b6fee-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.909318 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c","Type":"ContainerStarted","Data":"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3"} Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.942482 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.207555693 podStartE2EDuration="6.942459871s" podCreationTimestamp="2025-11-25 21:48:47 +0000 UTC" firstStartedPulling="2025-11-25 21:48:48.600040924 +0000 UTC m=+1084.062517246" lastFinishedPulling="2025-11-25 21:48:53.334945102 +0000 UTC m=+1088.797421424" observedRunningTime="2025-11-25 21:48:53.932494406 +0000 UTC m=+1089.394970728" watchObservedRunningTime="2025-11-25 21:48:53.942459871 +0000 UTC m=+1089.404936193" Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.970317 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-58f8d7cc56-csk7l"] Nov 25 21:48:53 crc kubenswrapper[4910]: I1125 21:48:53.979112 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-58f8d7cc56-csk7l"] Nov 25 21:48:54 crc kubenswrapper[4910]: I1125 21:48:54.083504 4910 scope.go:117] "RemoveContainer" containerID="3d1388e3f9936a5e6838117a86dbadfac66d243bb6179a027f0f0a595cdb55ae" Nov 25 21:48:54 crc kubenswrapper[4910]: I1125 21:48:54.925482 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.216054 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78dc494b-f987-443a-a350-1988639b6fee" path="/var/lib/kubelet/pods/78dc494b-f987-443a-a350-1988639b6fee/volumes" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.783972 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-gswkn"] Nov 25 21:48:55 crc kubenswrapper[4910]: E1125 21:48:55.784468 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80" containerName="mariadb-database-create" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784489 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80" containerName="mariadb-database-create" Nov 25 21:48:55 crc kubenswrapper[4910]: E1125 21:48:55.784522 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8187b53-8402-4ae3-b580-5afa43f29e9f" containerName="mariadb-account-create-update" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784529 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8187b53-8402-4ae3-b580-5afa43f29e9f" containerName="mariadb-account-create-update" Nov 25 21:48:55 crc kubenswrapper[4910]: E1125 21:48:55.784540 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d012ed8c-8195-4ea9-b3e3-4a3e750e8d70" containerName="mariadb-database-create" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784549 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d012ed8c-8195-4ea9-b3e3-4a3e750e8d70" containerName="mariadb-database-create" Nov 25 21:48:55 crc kubenswrapper[4910]: E1125 21:48:55.784562 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb48d3cb-07fa-4be7-bbb2-8af493a83edf" containerName="mariadb-account-create-update" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784569 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb48d3cb-07fa-4be7-bbb2-8af493a83edf" containerName="mariadb-account-create-update" Nov 25 21:48:55 crc kubenswrapper[4910]: E1125 21:48:55.784593 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b417522-64b1-43ad-84e9-19795c605ebf" containerName="mariadb-database-create" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784602 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b417522-64b1-43ad-84e9-19795c605ebf" containerName="mariadb-database-create" Nov 25 21:48:55 crc kubenswrapper[4910]: E1125 21:48:55.784617 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78dc494b-f987-443a-a350-1988639b6fee" containerName="horizon" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784624 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="78dc494b-f987-443a-a350-1988639b6fee" containerName="horizon" Nov 25 21:48:55 crc kubenswrapper[4910]: E1125 21:48:55.784643 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41176817-8b80-4a07-832e-3957be57cf82" containerName="mariadb-account-create-update" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784651 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="41176817-8b80-4a07-832e-3957be57cf82" containerName="mariadb-account-create-update" Nov 25 21:48:55 crc kubenswrapper[4910]: E1125 21:48:55.784677 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78dc494b-f987-443a-a350-1988639b6fee" containerName="horizon-log" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784684 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="78dc494b-f987-443a-a350-1988639b6fee" containerName="horizon-log" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784887 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d012ed8c-8195-4ea9-b3e3-4a3e750e8d70" containerName="mariadb-database-create" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784913 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80" containerName="mariadb-database-create" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784934 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="78dc494b-f987-443a-a350-1988639b6fee" containerName="horizon-log" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784949 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8187b53-8402-4ae3-b580-5afa43f29e9f" containerName="mariadb-account-create-update" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784961 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb48d3cb-07fa-4be7-bbb2-8af493a83edf" containerName="mariadb-account-create-update" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784974 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="78dc494b-f987-443a-a350-1988639b6fee" containerName="horizon" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784987 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b417522-64b1-43ad-84e9-19795c605ebf" containerName="mariadb-database-create" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.784998 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="41176817-8b80-4a07-832e-3957be57cf82" containerName="mariadb-account-create-update" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.785779 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.788319 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-hzngl" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.791176 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.802916 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-gswkn"] Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.803327 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.857754 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7b7r\" (UniqueName: \"kubernetes.io/projected/499e86de-9121-4084-b380-6bf87d8f4881-kube-api-access-z7b7r\") pod \"nova-cell0-conductor-db-sync-gswkn\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.857922 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-config-data\") pod \"nova-cell0-conductor-db-sync-gswkn\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.857991 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-scripts\") pod \"nova-cell0-conductor-db-sync-gswkn\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.858028 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-gswkn\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.976095 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7b7r\" (UniqueName: \"kubernetes.io/projected/499e86de-9121-4084-b380-6bf87d8f4881-kube-api-access-z7b7r\") pod \"nova-cell0-conductor-db-sync-gswkn\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.976351 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-config-data\") pod \"nova-cell0-conductor-db-sync-gswkn\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.976401 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-scripts\") pod \"nova-cell0-conductor-db-sync-gswkn\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.976423 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-gswkn\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:48:55 crc kubenswrapper[4910]: I1125 21:48:55.989820 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-config-data\") pod \"nova-cell0-conductor-db-sync-gswkn\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:48:56 crc kubenswrapper[4910]: I1125 21:48:56.000362 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-gswkn\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:48:56 crc kubenswrapper[4910]: I1125 21:48:56.006201 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-scripts\") pod \"nova-cell0-conductor-db-sync-gswkn\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:48:56 crc kubenswrapper[4910]: I1125 21:48:56.025927 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7b7r\" (UniqueName: \"kubernetes.io/projected/499e86de-9121-4084-b380-6bf87d8f4881-kube-api-access-z7b7r\") pod \"nova-cell0-conductor-db-sync-gswkn\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:48:56 crc kubenswrapper[4910]: I1125 21:48:56.104342 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:48:56 crc kubenswrapper[4910]: I1125 21:48:56.408893 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:56 crc kubenswrapper[4910]: W1125 21:48:56.774124 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod499e86de_9121_4084_b380_6bf87d8f4881.slice/crio-2d788d522f2f2b68e2bdc4c3d8f586e517a9a6044e6cef06027f7bd4c50fe83b WatchSource:0}: Error finding container 2d788d522f2f2b68e2bdc4c3d8f586e517a9a6044e6cef06027f7bd4c50fe83b: Status 404 returned error can't find the container with id 2d788d522f2f2b68e2bdc4c3d8f586e517a9a6044e6cef06027f7bd4c50fe83b Nov 25 21:48:56 crc kubenswrapper[4910]: I1125 21:48:56.783335 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-gswkn"] Nov 25 21:48:56 crc kubenswrapper[4910]: I1125 21:48:56.953356 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-gswkn" event={"ID":"499e86de-9121-4084-b380-6bf87d8f4881","Type":"ContainerStarted","Data":"2d788d522f2f2b68e2bdc4c3d8f586e517a9a6044e6cef06027f7bd4c50fe83b"} Nov 25 21:48:56 crc kubenswrapper[4910]: I1125 21:48:56.953676 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="sg-core" containerID="cri-o://e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d" gracePeriod=30 Nov 25 21:48:56 crc kubenswrapper[4910]: I1125 21:48:56.953717 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="ceilometer-notification-agent" containerID="cri-o://732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef" gracePeriod=30 Nov 25 21:48:56 crc kubenswrapper[4910]: I1125 21:48:56.953696 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="proxy-httpd" containerID="cri-o://463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3" gracePeriod=30 Nov 25 21:48:56 crc kubenswrapper[4910]: I1125 21:48:56.953649 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="ceilometer-central-agent" containerID="cri-o://daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3" gracePeriod=30 Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.380183 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.384864 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-6575886cb7-hv9qm" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.636280 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.636340 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.698254 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.736444 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.786710 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.826364 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-run-httpd\") pod \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.826583 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-log-httpd\") pod \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.826622 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-config-data\") pod \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.826644 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-scripts\") pod \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.826680 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w28q\" (UniqueName: \"kubernetes.io/projected/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-kube-api-access-2w28q\") pod \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.826735 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-combined-ca-bundle\") pod \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.826806 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-sg-core-conf-yaml\") pod \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\" (UID: \"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c\") " Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.832424 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" (UID: "f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.832779 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" (UID: "f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.843481 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-kube-api-access-2w28q" (OuterVolumeSpecName: "kube-api-access-2w28q") pod "f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" (UID: "f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c"). InnerVolumeSpecName "kube-api-access-2w28q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.843838 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-scripts" (OuterVolumeSpecName: "scripts") pod "f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" (UID: "f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.871222 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" (UID: "f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.920542 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" (UID: "f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.933603 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.933700 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w28q\" (UniqueName: \"kubernetes.io/projected/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-kube-api-access-2w28q\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.933724 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.933744 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.933760 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.933808 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.979054 4910 generic.go:334] "Generic (PLEG): container finished" podID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerID="463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3" exitCode=0 Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.979092 4910 generic.go:334] "Generic (PLEG): container finished" podID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerID="e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d" exitCode=2 Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.979121 4910 generic.go:334] "Generic (PLEG): container finished" podID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerID="732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef" exitCode=0 Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.979129 4910 generic.go:334] "Generic (PLEG): container finished" podID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerID="daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3" exitCode=0 Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.986316 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.987147 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c","Type":"ContainerDied","Data":"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3"} Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.987212 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.987228 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c","Type":"ContainerDied","Data":"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d"} Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.987259 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c","Type":"ContainerDied","Data":"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef"} Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.987275 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.987283 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c","Type":"ContainerDied","Data":"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3"} Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.987292 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c","Type":"ContainerDied","Data":"82ad0a93a7aedc77e586d06fdc330252bbe273ec9675e54053c9e6ea6a3a62d9"} Nov 25 21:48:57 crc kubenswrapper[4910]: I1125 21:48:57.987310 4910 scope.go:117] "RemoveContainer" containerID="463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.037625 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-config-data" (OuterVolumeSpecName: "config-data") pod "f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" (UID: "f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.119761 4910 scope.go:117] "RemoveContainer" containerID="e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.137376 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.182391 4910 scope.go:117] "RemoveContainer" containerID="732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.224398 4910 scope.go:117] "RemoveContainer" containerID="daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.271926 4910 scope.go:117] "RemoveContainer" containerID="463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3" Nov 25 21:48:58 crc kubenswrapper[4910]: E1125 21:48:58.272692 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3\": container with ID starting with 463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3 not found: ID does not exist" containerID="463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.272728 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3"} err="failed to get container status \"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3\": rpc error: code = NotFound desc = could not find container \"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3\": container with ID starting with 463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3 not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.272753 4910 scope.go:117] "RemoveContainer" containerID="e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d" Nov 25 21:48:58 crc kubenswrapper[4910]: E1125 21:48:58.273482 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d\": container with ID starting with e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d not found: ID does not exist" containerID="e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.273514 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d"} err="failed to get container status \"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d\": rpc error: code = NotFound desc = could not find container \"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d\": container with ID starting with e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.273535 4910 scope.go:117] "RemoveContainer" containerID="732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef" Nov 25 21:48:58 crc kubenswrapper[4910]: E1125 21:48:58.273902 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef\": container with ID starting with 732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef not found: ID does not exist" containerID="732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.273934 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef"} err="failed to get container status \"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef\": rpc error: code = NotFound desc = could not find container \"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef\": container with ID starting with 732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.273955 4910 scope.go:117] "RemoveContainer" containerID="daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3" Nov 25 21:48:58 crc kubenswrapper[4910]: E1125 21:48:58.274591 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3\": container with ID starting with daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3 not found: ID does not exist" containerID="daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.274627 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3"} err="failed to get container status \"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3\": rpc error: code = NotFound desc = could not find container \"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3\": container with ID starting with daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3 not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.274649 4910 scope.go:117] "RemoveContainer" containerID="463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.276075 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3"} err="failed to get container status \"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3\": rpc error: code = NotFound desc = could not find container \"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3\": container with ID starting with 463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3 not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.276140 4910 scope.go:117] "RemoveContainer" containerID="e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.276573 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d"} err="failed to get container status \"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d\": rpc error: code = NotFound desc = could not find container \"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d\": container with ID starting with e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.276592 4910 scope.go:117] "RemoveContainer" containerID="732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.277665 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef"} err="failed to get container status \"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef\": rpc error: code = NotFound desc = could not find container \"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef\": container with ID starting with 732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.277726 4910 scope.go:117] "RemoveContainer" containerID="daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.279056 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3"} err="failed to get container status \"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3\": rpc error: code = NotFound desc = could not find container \"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3\": container with ID starting with daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3 not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.279093 4910 scope.go:117] "RemoveContainer" containerID="463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.279333 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3"} err="failed to get container status \"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3\": rpc error: code = NotFound desc = could not find container \"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3\": container with ID starting with 463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3 not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.279547 4910 scope.go:117] "RemoveContainer" containerID="e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.281352 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d"} err="failed to get container status \"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d\": rpc error: code = NotFound desc = could not find container \"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d\": container with ID starting with e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.281438 4910 scope.go:117] "RemoveContainer" containerID="732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.281840 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef"} err="failed to get container status \"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef\": rpc error: code = NotFound desc = could not find container \"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef\": container with ID starting with 732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.281867 4910 scope.go:117] "RemoveContainer" containerID="daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.283639 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3"} err="failed to get container status \"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3\": rpc error: code = NotFound desc = could not find container \"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3\": container with ID starting with daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3 not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.283665 4910 scope.go:117] "RemoveContainer" containerID="463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.283883 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3"} err="failed to get container status \"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3\": rpc error: code = NotFound desc = could not find container \"463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3\": container with ID starting with 463d1450d976466da5559cae70bc43a63dc5af96d12265587ce7e475130148b3 not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.283903 4910 scope.go:117] "RemoveContainer" containerID="e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.284462 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d"} err="failed to get container status \"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d\": rpc error: code = NotFound desc = could not find container \"e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d\": container with ID starting with e5333bbb4b12bc662afcc6bca37d48813fb153051a127e8c4fda9c04b3896a2d not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.284484 4910 scope.go:117] "RemoveContainer" containerID="732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.285406 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef"} err="failed to get container status \"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef\": rpc error: code = NotFound desc = could not find container \"732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef\": container with ID starting with 732afee8ddcc215e176963c74b6869a379b5a2721b440cca3787f51b1ff80eef not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.285436 4910 scope.go:117] "RemoveContainer" containerID="daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.285686 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3"} err="failed to get container status \"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3\": rpc error: code = NotFound desc = could not find container \"daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3\": container with ID starting with daa82880c35e8f1f288c438a273404bb979c3ab24c0d99251ec51074142a85c3 not found: ID does not exist" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.338521 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.357049 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.370717 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:58 crc kubenswrapper[4910]: E1125 21:48:58.371400 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="ceilometer-central-agent" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.371429 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="ceilometer-central-agent" Nov 25 21:48:58 crc kubenswrapper[4910]: E1125 21:48:58.371463 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="sg-core" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.371473 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="sg-core" Nov 25 21:48:58 crc kubenswrapper[4910]: E1125 21:48:58.371485 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="proxy-httpd" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.371494 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="proxy-httpd" Nov 25 21:48:58 crc kubenswrapper[4910]: E1125 21:48:58.371524 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="ceilometer-notification-agent" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.371533 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="ceilometer-notification-agent" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.371788 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="proxy-httpd" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.371830 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="ceilometer-central-agent" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.371849 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="sg-core" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.371865 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" containerName="ceilometer-notification-agent" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.376491 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.380211 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.380350 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.380493 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.447037 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4dcc472-63c8-464f-8cc9-a854300f4458-run-httpd\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.447128 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqfrx\" (UniqueName: \"kubernetes.io/projected/b4dcc472-63c8-464f-8cc9-a854300f4458-kube-api-access-qqfrx\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.447205 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-scripts\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.447252 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.447353 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-config-data\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.447460 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4dcc472-63c8-464f-8cc9-a854300f4458-log-httpd\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.447550 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.550646 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4dcc472-63c8-464f-8cc9-a854300f4458-log-httpd\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.550736 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.550871 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4dcc472-63c8-464f-8cc9-a854300f4458-run-httpd\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.550911 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqfrx\" (UniqueName: \"kubernetes.io/projected/b4dcc472-63c8-464f-8cc9-a854300f4458-kube-api-access-qqfrx\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.550961 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-scripts\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.550988 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.551025 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-config-data\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.551228 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4dcc472-63c8-464f-8cc9-a854300f4458-log-httpd\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.551433 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4dcc472-63c8-464f-8cc9-a854300f4458-run-httpd\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.558057 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.558860 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.559131 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-scripts\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.560214 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-config-data\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.589129 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqfrx\" (UniqueName: \"kubernetes.io/projected/b4dcc472-63c8-464f-8cc9-a854300f4458-kube-api-access-qqfrx\") pod \"ceilometer-0\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " pod="openstack/ceilometer-0" Nov 25 21:48:58 crc kubenswrapper[4910]: I1125 21:48:58.737625 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:48:59 crc kubenswrapper[4910]: I1125 21:48:59.023288 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 21:48:59 crc kubenswrapper[4910]: I1125 21:48:59.023878 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 25 21:48:59 crc kubenswrapper[4910]: I1125 21:48:59.069801 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 21:48:59 crc kubenswrapper[4910]: I1125 21:48:59.085260 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 25 21:48:59 crc kubenswrapper[4910]: I1125 21:48:59.222480 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c" path="/var/lib/kubelet/pods/f56ecdbd-f9cb-4de2-bfbd-382ee3dddd7c/volumes" Nov 25 21:48:59 crc kubenswrapper[4910]: I1125 21:48:59.312268 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:49:00 crc kubenswrapper[4910]: I1125 21:49:00.055400 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4dcc472-63c8-464f-8cc9-a854300f4458","Type":"ContainerStarted","Data":"127b7e7efe2c0ede5b2cdd23d9af856f3e9bb317233d7db4915430ad14e53668"} Nov 25 21:49:00 crc kubenswrapper[4910]: I1125 21:49:00.056022 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 21:49:00 crc kubenswrapper[4910]: I1125 21:49:00.056041 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 21:49:00 crc kubenswrapper[4910]: I1125 21:49:00.057610 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 21:49:00 crc kubenswrapper[4910]: I1125 21:49:00.057725 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 25 21:49:00 crc kubenswrapper[4910]: I1125 21:49:00.388433 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 21:49:00 crc kubenswrapper[4910]: I1125 21:49:00.396765 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 25 21:49:01 crc kubenswrapper[4910]: I1125 21:49:01.070176 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4dcc472-63c8-464f-8cc9-a854300f4458","Type":"ContainerStarted","Data":"b206e1ed75c1d3a21d9f511183b83546762b9eeb204eafa61e26fef50296c718"} Nov 25 21:49:01 crc kubenswrapper[4910]: I1125 21:49:01.070825 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4dcc472-63c8-464f-8cc9-a854300f4458","Type":"ContainerStarted","Data":"7fdc1d6a7ae0b279f32b318f90f6ad0d8af6130f11e0bd2b5cb81924049d9dc2"} Nov 25 21:49:02 crc kubenswrapper[4910]: I1125 21:49:02.082261 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4dcc472-63c8-464f-8cc9-a854300f4458","Type":"ContainerStarted","Data":"108d7aed134ed308c0e5b1b87dccf06757d09d8407c4f1aede35818f1f078140"} Nov 25 21:49:02 crc kubenswrapper[4910]: I1125 21:49:02.082653 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 21:49:02 crc kubenswrapper[4910]: I1125 21:49:02.082667 4910 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 25 21:49:02 crc kubenswrapper[4910]: I1125 21:49:02.109296 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:49:02 crc kubenswrapper[4910]: I1125 21:49:02.587537 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 21:49:02 crc kubenswrapper[4910]: I1125 21:49:02.880669 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 25 21:49:10 crc kubenswrapper[4910]: I1125 21:49:10.155554 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-gswkn" event={"ID":"499e86de-9121-4084-b380-6bf87d8f4881","Type":"ContainerStarted","Data":"ac5fe85faf6d03a30df279c68be256f02f6dc342b7d5c492aa000ab21c321844"} Nov 25 21:49:10 crc kubenswrapper[4910]: I1125 21:49:10.161506 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4dcc472-63c8-464f-8cc9-a854300f4458","Type":"ContainerStarted","Data":"f2eff9ca2d690397a087e89412ac80f764261e485f8ce031b3229952101e11f9"} Nov 25 21:49:10 crc kubenswrapper[4910]: I1125 21:49:10.161662 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="ceilometer-central-agent" containerID="cri-o://7fdc1d6a7ae0b279f32b318f90f6ad0d8af6130f11e0bd2b5cb81924049d9dc2" gracePeriod=30 Nov 25 21:49:10 crc kubenswrapper[4910]: I1125 21:49:10.161877 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 21:49:10 crc kubenswrapper[4910]: I1125 21:49:10.161926 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="proxy-httpd" containerID="cri-o://f2eff9ca2d690397a087e89412ac80f764261e485f8ce031b3229952101e11f9" gracePeriod=30 Nov 25 21:49:10 crc kubenswrapper[4910]: I1125 21:49:10.161969 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="sg-core" containerID="cri-o://108d7aed134ed308c0e5b1b87dccf06757d09d8407c4f1aede35818f1f078140" gracePeriod=30 Nov 25 21:49:10 crc kubenswrapper[4910]: I1125 21:49:10.162007 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="ceilometer-notification-agent" containerID="cri-o://b206e1ed75c1d3a21d9f511183b83546762b9eeb204eafa61e26fef50296c718" gracePeriod=30 Nov 25 21:49:10 crc kubenswrapper[4910]: I1125 21:49:10.178321 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-gswkn" podStartSLOduration=3.0124425439999998 podStartE2EDuration="15.178299975s" podCreationTimestamp="2025-11-25 21:48:55 +0000 UTC" firstStartedPulling="2025-11-25 21:48:56.77628179 +0000 UTC m=+1092.238758112" lastFinishedPulling="2025-11-25 21:49:08.942139211 +0000 UTC m=+1104.404615543" observedRunningTime="2025-11-25 21:49:10.176505927 +0000 UTC m=+1105.638982249" watchObservedRunningTime="2025-11-25 21:49:10.178299975 +0000 UTC m=+1105.640776297" Nov 25 21:49:10 crc kubenswrapper[4910]: I1125 21:49:10.212485 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.641840078 podStartE2EDuration="12.212462411s" podCreationTimestamp="2025-11-25 21:48:58 +0000 UTC" firstStartedPulling="2025-11-25 21:48:59.371310152 +0000 UTC m=+1094.833786474" lastFinishedPulling="2025-11-25 21:49:08.941932475 +0000 UTC m=+1104.404408807" observedRunningTime="2025-11-25 21:49:10.203666808 +0000 UTC m=+1105.666143130" watchObservedRunningTime="2025-11-25 21:49:10.212462411 +0000 UTC m=+1105.674938733" Nov 25 21:49:11 crc kubenswrapper[4910]: I1125 21:49:11.177515 4910 generic.go:334] "Generic (PLEG): container finished" podID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerID="f2eff9ca2d690397a087e89412ac80f764261e485f8ce031b3229952101e11f9" exitCode=0 Nov 25 21:49:11 crc kubenswrapper[4910]: I1125 21:49:11.178024 4910 generic.go:334] "Generic (PLEG): container finished" podID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerID="108d7aed134ed308c0e5b1b87dccf06757d09d8407c4f1aede35818f1f078140" exitCode=2 Nov 25 21:49:11 crc kubenswrapper[4910]: I1125 21:49:11.178034 4910 generic.go:334] "Generic (PLEG): container finished" podID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerID="b206e1ed75c1d3a21d9f511183b83546762b9eeb204eafa61e26fef50296c718" exitCode=0 Nov 25 21:49:11 crc kubenswrapper[4910]: I1125 21:49:11.177613 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4dcc472-63c8-464f-8cc9-a854300f4458","Type":"ContainerDied","Data":"f2eff9ca2d690397a087e89412ac80f764261e485f8ce031b3229952101e11f9"} Nov 25 21:49:11 crc kubenswrapper[4910]: I1125 21:49:11.178161 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4dcc472-63c8-464f-8cc9-a854300f4458","Type":"ContainerDied","Data":"108d7aed134ed308c0e5b1b87dccf06757d09d8407c4f1aede35818f1f078140"} Nov 25 21:49:11 crc kubenswrapper[4910]: I1125 21:49:11.178181 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4dcc472-63c8-464f-8cc9-a854300f4458","Type":"ContainerDied","Data":"b206e1ed75c1d3a21d9f511183b83546762b9eeb204eafa61e26fef50296c718"} Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.191481 4910 generic.go:334] "Generic (PLEG): container finished" podID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerID="7fdc1d6a7ae0b279f32b318f90f6ad0d8af6130f11e0bd2b5cb81924049d9dc2" exitCode=0 Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.191552 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4dcc472-63c8-464f-8cc9-a854300f4458","Type":"ContainerDied","Data":"7fdc1d6a7ae0b279f32b318f90f6ad0d8af6130f11e0bd2b5cb81924049d9dc2"} Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.191877 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b4dcc472-63c8-464f-8cc9-a854300f4458","Type":"ContainerDied","Data":"127b7e7efe2c0ede5b2cdd23d9af856f3e9bb317233d7db4915430ad14e53668"} Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.191900 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="127b7e7efe2c0ede5b2cdd23d9af856f3e9bb317233d7db4915430ad14e53668" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.240455 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.292277 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-sg-core-conf-yaml\") pod \"b4dcc472-63c8-464f-8cc9-a854300f4458\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.292334 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-combined-ca-bundle\") pod \"b4dcc472-63c8-464f-8cc9-a854300f4458\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.292463 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqfrx\" (UniqueName: \"kubernetes.io/projected/b4dcc472-63c8-464f-8cc9-a854300f4458-kube-api-access-qqfrx\") pod \"b4dcc472-63c8-464f-8cc9-a854300f4458\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.292493 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-scripts\") pod \"b4dcc472-63c8-464f-8cc9-a854300f4458\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.292646 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4dcc472-63c8-464f-8cc9-a854300f4458-log-httpd\") pod \"b4dcc472-63c8-464f-8cc9-a854300f4458\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.292672 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-config-data\") pod \"b4dcc472-63c8-464f-8cc9-a854300f4458\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.292700 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4dcc472-63c8-464f-8cc9-a854300f4458-run-httpd\") pod \"b4dcc472-63c8-464f-8cc9-a854300f4458\" (UID: \"b4dcc472-63c8-464f-8cc9-a854300f4458\") " Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.294448 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4dcc472-63c8-464f-8cc9-a854300f4458-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b4dcc472-63c8-464f-8cc9-a854300f4458" (UID: "b4dcc472-63c8-464f-8cc9-a854300f4458"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.295027 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4dcc472-63c8-464f-8cc9-a854300f4458-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b4dcc472-63c8-464f-8cc9-a854300f4458" (UID: "b4dcc472-63c8-464f-8cc9-a854300f4458"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.331605 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-scripts" (OuterVolumeSpecName: "scripts") pod "b4dcc472-63c8-464f-8cc9-a854300f4458" (UID: "b4dcc472-63c8-464f-8cc9-a854300f4458"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.331728 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4dcc472-63c8-464f-8cc9-a854300f4458-kube-api-access-qqfrx" (OuterVolumeSpecName: "kube-api-access-qqfrx") pod "b4dcc472-63c8-464f-8cc9-a854300f4458" (UID: "b4dcc472-63c8-464f-8cc9-a854300f4458"). InnerVolumeSpecName "kube-api-access-qqfrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.357350 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b4dcc472-63c8-464f-8cc9-a854300f4458" (UID: "b4dcc472-63c8-464f-8cc9-a854300f4458"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.396376 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqfrx\" (UniqueName: \"kubernetes.io/projected/b4dcc472-63c8-464f-8cc9-a854300f4458-kube-api-access-qqfrx\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.396431 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.396449 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4dcc472-63c8-464f-8cc9-a854300f4458-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.396464 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b4dcc472-63c8-464f-8cc9-a854300f4458-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.396476 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.427964 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4dcc472-63c8-464f-8cc9-a854300f4458" (UID: "b4dcc472-63c8-464f-8cc9-a854300f4458"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.451357 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-config-data" (OuterVolumeSpecName: "config-data") pod "b4dcc472-63c8-464f-8cc9-a854300f4458" (UID: "b4dcc472-63c8-464f-8cc9-a854300f4458"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.505557 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:12 crc kubenswrapper[4910]: I1125 21:49:12.505606 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4dcc472-63c8-464f-8cc9-a854300f4458-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.223534 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.278250 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.292851 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.306026 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:49:13 crc kubenswrapper[4910]: E1125 21:49:13.306584 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="sg-core" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.306606 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="sg-core" Nov 25 21:49:13 crc kubenswrapper[4910]: E1125 21:49:13.306625 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="ceilometer-notification-agent" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.306633 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="ceilometer-notification-agent" Nov 25 21:49:13 crc kubenswrapper[4910]: E1125 21:49:13.306650 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="proxy-httpd" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.306657 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="proxy-httpd" Nov 25 21:49:13 crc kubenswrapper[4910]: E1125 21:49:13.306688 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="ceilometer-central-agent" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.306695 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="ceilometer-central-agent" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.306927 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="ceilometer-central-agent" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.306944 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="proxy-httpd" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.306955 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="ceilometer-notification-agent" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.306973 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" containerName="sg-core" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.308918 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.313372 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.313401 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.315222 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.430183 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvsn4\" (UniqueName: \"kubernetes.io/projected/2c4cd93c-8303-4924-86f3-94593b6377cd-kube-api-access-zvsn4\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.430288 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c4cd93c-8303-4924-86f3-94593b6377cd-log-httpd\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.430327 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.430354 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.430375 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-scripts\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.430422 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c4cd93c-8303-4924-86f3-94593b6377cd-run-httpd\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.430519 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-config-data\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.532404 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-config-data\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.532742 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvsn4\" (UniqueName: \"kubernetes.io/projected/2c4cd93c-8303-4924-86f3-94593b6377cd-kube-api-access-zvsn4\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.532860 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c4cd93c-8303-4924-86f3-94593b6377cd-log-httpd\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.533016 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.533602 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.533748 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-scripts\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.533593 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c4cd93c-8303-4924-86f3-94593b6377cd-log-httpd\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.534887 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c4cd93c-8303-4924-86f3-94593b6377cd-run-httpd\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.535831 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c4cd93c-8303-4924-86f3-94593b6377cd-run-httpd\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.541303 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.543324 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.545731 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-config-data\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.548076 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-scripts\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.562167 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvsn4\" (UniqueName: \"kubernetes.io/projected/2c4cd93c-8303-4924-86f3-94593b6377cd-kube-api-access-zvsn4\") pod \"ceilometer-0\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " pod="openstack/ceilometer-0" Nov 25 21:49:13 crc kubenswrapper[4910]: I1125 21:49:13.647622 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:49:14 crc kubenswrapper[4910]: W1125 21:49:14.197729 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c4cd93c_8303_4924_86f3_94593b6377cd.slice/crio-c7e3271968c4b4c880d5438bb84c6396db894d78da4e67ebf3a4e5d1b73a3b2d WatchSource:0}: Error finding container c7e3271968c4b4c880d5438bb84c6396db894d78da4e67ebf3a4e5d1b73a3b2d: Status 404 returned error can't find the container with id c7e3271968c4b4c880d5438bb84c6396db894d78da4e67ebf3a4e5d1b73a3b2d Nov 25 21:49:14 crc kubenswrapper[4910]: I1125 21:49:14.206020 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:49:14 crc kubenswrapper[4910]: I1125 21:49:14.234211 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c4cd93c-8303-4924-86f3-94593b6377cd","Type":"ContainerStarted","Data":"c7e3271968c4b4c880d5438bb84c6396db894d78da4e67ebf3a4e5d1b73a3b2d"} Nov 25 21:49:15 crc kubenswrapper[4910]: I1125 21:49:15.232073 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4dcc472-63c8-464f-8cc9-a854300f4458" path="/var/lib/kubelet/pods/b4dcc472-63c8-464f-8cc9-a854300f4458/volumes" Nov 25 21:49:15 crc kubenswrapper[4910]: I1125 21:49:15.265056 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c4cd93c-8303-4924-86f3-94593b6377cd","Type":"ContainerStarted","Data":"82a8bf4d8ed9c98f9561b3bafd6c4a1600d40d02c497bf0315a54cf35861e99a"} Nov 25 21:49:16 crc kubenswrapper[4910]: I1125 21:49:16.279133 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c4cd93c-8303-4924-86f3-94593b6377cd","Type":"ContainerStarted","Data":"40579dbfffb21f5db6d73e14169fda342be996e925484c7d7063627265800774"} Nov 25 21:49:17 crc kubenswrapper[4910]: I1125 21:49:17.300068 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c4cd93c-8303-4924-86f3-94593b6377cd","Type":"ContainerStarted","Data":"70efc7cacc2e18a81f4f291dfe55322cb009e7e88bd02816e9ebe8c680ec3b68"} Nov 25 21:49:18 crc kubenswrapper[4910]: I1125 21:49:18.354570 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c4cd93c-8303-4924-86f3-94593b6377cd","Type":"ContainerStarted","Data":"2ca95f25c4faafca84618e0d90621fa55fbd1ced49f263d7587e4ef915631752"} Nov 25 21:49:18 crc kubenswrapper[4910]: I1125 21:49:18.355029 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 21:49:23 crc kubenswrapper[4910]: I1125 21:49:23.099237 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:49:23 crc kubenswrapper[4910]: I1125 21:49:23.100019 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:49:23 crc kubenswrapper[4910]: I1125 21:49:23.100092 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:49:23 crc kubenswrapper[4910]: I1125 21:49:23.100953 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"0b0066f1a169222bc8e764ac54716c8dcd57922f8eb880531d5e609e43cc685c"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 21:49:23 crc kubenswrapper[4910]: I1125 21:49:23.101015 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://0b0066f1a169222bc8e764ac54716c8dcd57922f8eb880531d5e609e43cc685c" gracePeriod=600 Nov 25 21:49:23 crc kubenswrapper[4910]: I1125 21:49:23.409146 4910 generic.go:334] "Generic (PLEG): container finished" podID="499e86de-9121-4084-b380-6bf87d8f4881" containerID="ac5fe85faf6d03a30df279c68be256f02f6dc342b7d5c492aa000ab21c321844" exitCode=0 Nov 25 21:49:23 crc kubenswrapper[4910]: I1125 21:49:23.409216 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-gswkn" event={"ID":"499e86de-9121-4084-b380-6bf87d8f4881","Type":"ContainerDied","Data":"ac5fe85faf6d03a30df279c68be256f02f6dc342b7d5c492aa000ab21c321844"} Nov 25 21:49:23 crc kubenswrapper[4910]: I1125 21:49:23.412690 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"0b0066f1a169222bc8e764ac54716c8dcd57922f8eb880531d5e609e43cc685c"} Nov 25 21:49:23 crc kubenswrapper[4910]: I1125 21:49:23.412749 4910 scope.go:117] "RemoveContainer" containerID="8368a57726af5a6b75ce9b9efb9fa3828db0cba5637cfb1aba6ea91ccf50acb2" Nov 25 21:49:23 crc kubenswrapper[4910]: I1125 21:49:23.413389 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="0b0066f1a169222bc8e764ac54716c8dcd57922f8eb880531d5e609e43cc685c" exitCode=0 Nov 25 21:49:23 crc kubenswrapper[4910]: I1125 21:49:23.429765 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=6.758783081 podStartE2EDuration="10.429742776s" podCreationTimestamp="2025-11-25 21:49:13 +0000 UTC" firstStartedPulling="2025-11-25 21:49:14.200341678 +0000 UTC m=+1109.662818000" lastFinishedPulling="2025-11-25 21:49:17.871301373 +0000 UTC m=+1113.333777695" observedRunningTime="2025-11-25 21:49:18.401615045 +0000 UTC m=+1113.864091367" watchObservedRunningTime="2025-11-25 21:49:23.429742776 +0000 UTC m=+1118.892219098" Nov 25 21:49:24 crc kubenswrapper[4910]: I1125 21:49:24.428765 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"7b6cad0a631fec9eb58a49d3ce7f8c662dc70b5fec077e96f2c93ef3235ee8bf"} Nov 25 21:49:24 crc kubenswrapper[4910]: I1125 21:49:24.800406 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:49:24 crc kubenswrapper[4910]: I1125 21:49:24.903523 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7b7r\" (UniqueName: \"kubernetes.io/projected/499e86de-9121-4084-b380-6bf87d8f4881-kube-api-access-z7b7r\") pod \"499e86de-9121-4084-b380-6bf87d8f4881\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " Nov 25 21:49:24 crc kubenswrapper[4910]: I1125 21:49:24.903602 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-combined-ca-bundle\") pod \"499e86de-9121-4084-b380-6bf87d8f4881\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " Nov 25 21:49:24 crc kubenswrapper[4910]: I1125 21:49:24.903629 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-scripts\") pod \"499e86de-9121-4084-b380-6bf87d8f4881\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " Nov 25 21:49:24 crc kubenswrapper[4910]: I1125 21:49:24.903721 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-config-data\") pod \"499e86de-9121-4084-b380-6bf87d8f4881\" (UID: \"499e86de-9121-4084-b380-6bf87d8f4881\") " Nov 25 21:49:24 crc kubenswrapper[4910]: I1125 21:49:24.912358 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/499e86de-9121-4084-b380-6bf87d8f4881-kube-api-access-z7b7r" (OuterVolumeSpecName: "kube-api-access-z7b7r") pod "499e86de-9121-4084-b380-6bf87d8f4881" (UID: "499e86de-9121-4084-b380-6bf87d8f4881"). InnerVolumeSpecName "kube-api-access-z7b7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:49:24 crc kubenswrapper[4910]: I1125 21:49:24.912883 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-scripts" (OuterVolumeSpecName: "scripts") pod "499e86de-9121-4084-b380-6bf87d8f4881" (UID: "499e86de-9121-4084-b380-6bf87d8f4881"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:24 crc kubenswrapper[4910]: I1125 21:49:24.944553 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "499e86de-9121-4084-b380-6bf87d8f4881" (UID: "499e86de-9121-4084-b380-6bf87d8f4881"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:24 crc kubenswrapper[4910]: I1125 21:49:24.947720 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-config-data" (OuterVolumeSpecName: "config-data") pod "499e86de-9121-4084-b380-6bf87d8f4881" (UID: "499e86de-9121-4084-b380-6bf87d8f4881"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.006886 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.006945 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7b7r\" (UniqueName: \"kubernetes.io/projected/499e86de-9121-4084-b380-6bf87d8f4881-kube-api-access-z7b7r\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.006964 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.006977 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/499e86de-9121-4084-b380-6bf87d8f4881-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.441963 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-gswkn" event={"ID":"499e86de-9121-4084-b380-6bf87d8f4881","Type":"ContainerDied","Data":"2d788d522f2f2b68e2bdc4c3d8f586e517a9a6044e6cef06027f7bd4c50fe83b"} Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.442510 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d788d522f2f2b68e2bdc4c3d8f586e517a9a6044e6cef06027f7bd4c50fe83b" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.442208 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-gswkn" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.659839 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 21:49:25 crc kubenswrapper[4910]: E1125 21:49:25.660352 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="499e86de-9121-4084-b380-6bf87d8f4881" containerName="nova-cell0-conductor-db-sync" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.660372 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="499e86de-9121-4084-b380-6bf87d8f4881" containerName="nova-cell0-conductor-db-sync" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.660563 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="499e86de-9121-4084-b380-6bf87d8f4881" containerName="nova-cell0-conductor-db-sync" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.661188 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.667442 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.667667 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-hzngl" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.683578 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.722842 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/533ec0e8-93b1-4468-8b7f-72071aa8be27-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"533ec0e8-93b1-4468-8b7f-72071aa8be27\") " pod="openstack/nova-cell0-conductor-0" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.722911 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/533ec0e8-93b1-4468-8b7f-72071aa8be27-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"533ec0e8-93b1-4468-8b7f-72071aa8be27\") " pod="openstack/nova-cell0-conductor-0" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.722955 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9lt9\" (UniqueName: \"kubernetes.io/projected/533ec0e8-93b1-4468-8b7f-72071aa8be27-kube-api-access-f9lt9\") pod \"nova-cell0-conductor-0\" (UID: \"533ec0e8-93b1-4468-8b7f-72071aa8be27\") " pod="openstack/nova-cell0-conductor-0" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.824592 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/533ec0e8-93b1-4468-8b7f-72071aa8be27-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"533ec0e8-93b1-4468-8b7f-72071aa8be27\") " pod="openstack/nova-cell0-conductor-0" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.824656 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/533ec0e8-93b1-4468-8b7f-72071aa8be27-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"533ec0e8-93b1-4468-8b7f-72071aa8be27\") " pod="openstack/nova-cell0-conductor-0" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.824694 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9lt9\" (UniqueName: \"kubernetes.io/projected/533ec0e8-93b1-4468-8b7f-72071aa8be27-kube-api-access-f9lt9\") pod \"nova-cell0-conductor-0\" (UID: \"533ec0e8-93b1-4468-8b7f-72071aa8be27\") " pod="openstack/nova-cell0-conductor-0" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.831606 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/533ec0e8-93b1-4468-8b7f-72071aa8be27-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"533ec0e8-93b1-4468-8b7f-72071aa8be27\") " pod="openstack/nova-cell0-conductor-0" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.832081 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/533ec0e8-93b1-4468-8b7f-72071aa8be27-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"533ec0e8-93b1-4468-8b7f-72071aa8be27\") " pod="openstack/nova-cell0-conductor-0" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.845131 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9lt9\" (UniqueName: \"kubernetes.io/projected/533ec0e8-93b1-4468-8b7f-72071aa8be27-kube-api-access-f9lt9\") pod \"nova-cell0-conductor-0\" (UID: \"533ec0e8-93b1-4468-8b7f-72071aa8be27\") " pod="openstack/nova-cell0-conductor-0" Nov 25 21:49:25 crc kubenswrapper[4910]: I1125 21:49:25.977657 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 25 21:49:26 crc kubenswrapper[4910]: I1125 21:49:26.476212 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 25 21:49:26 crc kubenswrapper[4910]: W1125 21:49:26.482386 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod533ec0e8_93b1_4468_8b7f_72071aa8be27.slice/crio-7cb14c0553406d5cc9c3d9465c72a00e8fa33f1df1992ec08950df02d85e5568 WatchSource:0}: Error finding container 7cb14c0553406d5cc9c3d9465c72a00e8fa33f1df1992ec08950df02d85e5568: Status 404 returned error can't find the container with id 7cb14c0553406d5cc9c3d9465c72a00e8fa33f1df1992ec08950df02d85e5568 Nov 25 21:49:26 crc kubenswrapper[4910]: I1125 21:49:26.633490 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:49:26 crc kubenswrapper[4910]: I1125 21:49:26.633936 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="ceilometer-central-agent" containerID="cri-o://82a8bf4d8ed9c98f9561b3bafd6c4a1600d40d02c497bf0315a54cf35861e99a" gracePeriod=30 Nov 25 21:49:26 crc kubenswrapper[4910]: I1125 21:49:26.634013 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="proxy-httpd" containerID="cri-o://2ca95f25c4faafca84618e0d90621fa55fbd1ced49f263d7587e4ef915631752" gracePeriod=30 Nov 25 21:49:26 crc kubenswrapper[4910]: I1125 21:49:26.634051 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="sg-core" containerID="cri-o://70efc7cacc2e18a81f4f291dfe55322cb009e7e88bd02816e9ebe8c680ec3b68" gracePeriod=30 Nov 25 21:49:26 crc kubenswrapper[4910]: I1125 21:49:26.634099 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="ceilometer-notification-agent" containerID="cri-o://40579dbfffb21f5db6d73e14169fda342be996e925484c7d7063627265800774" gracePeriod=30 Nov 25 21:49:27 crc kubenswrapper[4910]: I1125 21:49:27.462462 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"533ec0e8-93b1-4468-8b7f-72071aa8be27","Type":"ContainerStarted","Data":"29ed6044092d5c2811a1f6699f36fee8baff2ca32732467180266e314e010c4f"} Nov 25 21:49:27 crc kubenswrapper[4910]: I1125 21:49:27.462956 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"533ec0e8-93b1-4468-8b7f-72071aa8be27","Type":"ContainerStarted","Data":"7cb14c0553406d5cc9c3d9465c72a00e8fa33f1df1992ec08950df02d85e5568"} Nov 25 21:49:27 crc kubenswrapper[4910]: I1125 21:49:27.464346 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 25 21:49:27 crc kubenswrapper[4910]: I1125 21:49:27.476302 4910 generic.go:334] "Generic (PLEG): container finished" podID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerID="2ca95f25c4faafca84618e0d90621fa55fbd1ced49f263d7587e4ef915631752" exitCode=0 Nov 25 21:49:27 crc kubenswrapper[4910]: I1125 21:49:27.476351 4910 generic.go:334] "Generic (PLEG): container finished" podID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerID="70efc7cacc2e18a81f4f291dfe55322cb009e7e88bd02816e9ebe8c680ec3b68" exitCode=2 Nov 25 21:49:27 crc kubenswrapper[4910]: I1125 21:49:27.476363 4910 generic.go:334] "Generic (PLEG): container finished" podID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerID="82a8bf4d8ed9c98f9561b3bafd6c4a1600d40d02c497bf0315a54cf35861e99a" exitCode=0 Nov 25 21:49:27 crc kubenswrapper[4910]: I1125 21:49:27.476384 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c4cd93c-8303-4924-86f3-94593b6377cd","Type":"ContainerDied","Data":"2ca95f25c4faafca84618e0d90621fa55fbd1ced49f263d7587e4ef915631752"} Nov 25 21:49:27 crc kubenswrapper[4910]: I1125 21:49:27.476415 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c4cd93c-8303-4924-86f3-94593b6377cd","Type":"ContainerDied","Data":"70efc7cacc2e18a81f4f291dfe55322cb009e7e88bd02816e9ebe8c680ec3b68"} Nov 25 21:49:27 crc kubenswrapper[4910]: I1125 21:49:27.476426 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c4cd93c-8303-4924-86f3-94593b6377cd","Type":"ContainerDied","Data":"82a8bf4d8ed9c98f9561b3bafd6c4a1600d40d02c497bf0315a54cf35861e99a"} Nov 25 21:49:27 crc kubenswrapper[4910]: I1125 21:49:27.493230 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.493208148 podStartE2EDuration="2.493208148s" podCreationTimestamp="2025-11-25 21:49:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:49:27.483917031 +0000 UTC m=+1122.946393363" watchObservedRunningTime="2025-11-25 21:49:27.493208148 +0000 UTC m=+1122.955684470" Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.490922 4910 generic.go:334] "Generic (PLEG): container finished" podID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerID="40579dbfffb21f5db6d73e14169fda342be996e925484c7d7063627265800774" exitCode=0 Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.491022 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c4cd93c-8303-4924-86f3-94593b6377cd","Type":"ContainerDied","Data":"40579dbfffb21f5db6d73e14169fda342be996e925484c7d7063627265800774"} Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.850826 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.901584 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-config-data\") pod \"2c4cd93c-8303-4924-86f3-94593b6377cd\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.901954 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-sg-core-conf-yaml\") pod \"2c4cd93c-8303-4924-86f3-94593b6377cd\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.902185 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c4cd93c-8303-4924-86f3-94593b6377cd-log-httpd\") pod \"2c4cd93c-8303-4924-86f3-94593b6377cd\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.902344 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-scripts\") pod \"2c4cd93c-8303-4924-86f3-94593b6377cd\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.903168 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c4cd93c-8303-4924-86f3-94593b6377cd-run-httpd\") pod \"2c4cd93c-8303-4924-86f3-94593b6377cd\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.903289 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-combined-ca-bundle\") pod \"2c4cd93c-8303-4924-86f3-94593b6377cd\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.903088 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c4cd93c-8303-4924-86f3-94593b6377cd-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "2c4cd93c-8303-4924-86f3-94593b6377cd" (UID: "2c4cd93c-8303-4924-86f3-94593b6377cd"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.903615 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvsn4\" (UniqueName: \"kubernetes.io/projected/2c4cd93c-8303-4924-86f3-94593b6377cd-kube-api-access-zvsn4\") pod \"2c4cd93c-8303-4924-86f3-94593b6377cd\" (UID: \"2c4cd93c-8303-4924-86f3-94593b6377cd\") " Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.903702 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c4cd93c-8303-4924-86f3-94593b6377cd-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "2c4cd93c-8303-4924-86f3-94593b6377cd" (UID: "2c4cd93c-8303-4924-86f3-94593b6377cd"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.904430 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c4cd93c-8303-4924-86f3-94593b6377cd-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.904544 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/2c4cd93c-8303-4924-86f3-94593b6377cd-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.914523 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-scripts" (OuterVolumeSpecName: "scripts") pod "2c4cd93c-8303-4924-86f3-94593b6377cd" (UID: "2c4cd93c-8303-4924-86f3-94593b6377cd"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.916591 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c4cd93c-8303-4924-86f3-94593b6377cd-kube-api-access-zvsn4" (OuterVolumeSpecName: "kube-api-access-zvsn4") pod "2c4cd93c-8303-4924-86f3-94593b6377cd" (UID: "2c4cd93c-8303-4924-86f3-94593b6377cd"). InnerVolumeSpecName "kube-api-access-zvsn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.941531 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "2c4cd93c-8303-4924-86f3-94593b6377cd" (UID: "2c4cd93c-8303-4924-86f3-94593b6377cd"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:28 crc kubenswrapper[4910]: I1125 21:49:28.995167 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2c4cd93c-8303-4924-86f3-94593b6377cd" (UID: "2c4cd93c-8303-4924-86f3-94593b6377cd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.006302 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.006407 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.006467 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.006523 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvsn4\" (UniqueName: \"kubernetes.io/projected/2c4cd93c-8303-4924-86f3-94593b6377cd-kube-api-access-zvsn4\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.044233 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-config-data" (OuterVolumeSpecName: "config-data") pod "2c4cd93c-8303-4924-86f3-94593b6377cd" (UID: "2c4cd93c-8303-4924-86f3-94593b6377cd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.109617 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c4cd93c-8303-4924-86f3-94593b6377cd-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.509709 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"2c4cd93c-8303-4924-86f3-94593b6377cd","Type":"ContainerDied","Data":"c7e3271968c4b4c880d5438bb84c6396db894d78da4e67ebf3a4e5d1b73a3b2d"} Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.509828 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.510166 4910 scope.go:117] "RemoveContainer" containerID="2ca95f25c4faafca84618e0d90621fa55fbd1ced49f263d7587e4ef915631752" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.580611 4910 scope.go:117] "RemoveContainer" containerID="70efc7cacc2e18a81f4f291dfe55322cb009e7e88bd02816e9ebe8c680ec3b68" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.590406 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.609621 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.631986 4910 scope.go:117] "RemoveContainer" containerID="40579dbfffb21f5db6d73e14169fda342be996e925484c7d7063627265800774" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.634137 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:49:29 crc kubenswrapper[4910]: E1125 21:49:29.634717 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="proxy-httpd" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.634746 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="proxy-httpd" Nov 25 21:49:29 crc kubenswrapper[4910]: E1125 21:49:29.634788 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="sg-core" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.634798 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="sg-core" Nov 25 21:49:29 crc kubenswrapper[4910]: E1125 21:49:29.634815 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="ceilometer-central-agent" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.634823 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="ceilometer-central-agent" Nov 25 21:49:29 crc kubenswrapper[4910]: E1125 21:49:29.634835 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="ceilometer-notification-agent" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.634845 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="ceilometer-notification-agent" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.635073 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="sg-core" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.635107 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="ceilometer-central-agent" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.635124 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="ceilometer-notification-agent" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.635141 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" containerName="proxy-httpd" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.637904 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.641939 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.644454 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.663919 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.676614 4910 scope.go:117] "RemoveContainer" containerID="82a8bf4d8ed9c98f9561b3bafd6c4a1600d40d02c497bf0315a54cf35861e99a" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.745781 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-run-httpd\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.745856 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-config-data\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.746305 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-log-httpd\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.746438 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pr2fw\" (UniqueName: \"kubernetes.io/projected/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-kube-api-access-pr2fw\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.746707 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.746794 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.746954 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-scripts\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.850061 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pr2fw\" (UniqueName: \"kubernetes.io/projected/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-kube-api-access-pr2fw\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.850198 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.850240 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.850338 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-scripts\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.850535 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-run-httpd\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.850623 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-config-data\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.851702 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-log-httpd\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.852204 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-log-httpd\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.851553 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-run-httpd\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.857465 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.857862 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-config-data\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.858404 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-scripts\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.862385 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.875201 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pr2fw\" (UniqueName: \"kubernetes.io/projected/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-kube-api-access-pr2fw\") pod \"ceilometer-0\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " pod="openstack/ceilometer-0" Nov 25 21:49:29 crc kubenswrapper[4910]: I1125 21:49:29.960629 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:49:30 crc kubenswrapper[4910]: I1125 21:49:30.481357 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:49:30 crc kubenswrapper[4910]: W1125 21:49:30.485850 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbd9a223d_c5ad_44be_8e56_08129fd5b3ba.slice/crio-671d8d9774821a8057af57d82e1a1f66b34a358a6a945ebead6fd0332aa81502 WatchSource:0}: Error finding container 671d8d9774821a8057af57d82e1a1f66b34a358a6a945ebead6fd0332aa81502: Status 404 returned error can't find the container with id 671d8d9774821a8057af57d82e1a1f66b34a358a6a945ebead6fd0332aa81502 Nov 25 21:49:30 crc kubenswrapper[4910]: I1125 21:49:30.550619 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd9a223d-c5ad-44be-8e56-08129fd5b3ba","Type":"ContainerStarted","Data":"671d8d9774821a8057af57d82e1a1f66b34a358a6a945ebead6fd0332aa81502"} Nov 25 21:49:31 crc kubenswrapper[4910]: I1125 21:49:31.225566 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c4cd93c-8303-4924-86f3-94593b6377cd" path="/var/lib/kubelet/pods/2c4cd93c-8303-4924-86f3-94593b6377cd/volumes" Nov 25 21:49:31 crc kubenswrapper[4910]: I1125 21:49:31.561479 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd9a223d-c5ad-44be-8e56-08129fd5b3ba","Type":"ContainerStarted","Data":"63f726954dc834029c3701691afaafacd775f92d700ed27908c265c6addf97d6"} Nov 25 21:49:32 crc kubenswrapper[4910]: I1125 21:49:32.583334 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd9a223d-c5ad-44be-8e56-08129fd5b3ba","Type":"ContainerStarted","Data":"9c9e6b721a2a063bee8d5e70902a93971d4989ad8b50482f6efc7989ae27c276"} Nov 25 21:49:33 crc kubenswrapper[4910]: I1125 21:49:33.608721 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd9a223d-c5ad-44be-8e56-08129fd5b3ba","Type":"ContainerStarted","Data":"f2fbfa0a15dda9fc0c46bf6aa78b8ad7cdb8f6f3ed839307df135fb701a63c0f"} Nov 25 21:49:34 crc kubenswrapper[4910]: I1125 21:49:34.618203 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd9a223d-c5ad-44be-8e56-08129fd5b3ba","Type":"ContainerStarted","Data":"e81fdca7f0f754e4680f00d08defa429b51b16f99db2da0b49b542973eec3507"} Nov 25 21:49:34 crc kubenswrapper[4910]: I1125 21:49:34.620358 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 21:49:34 crc kubenswrapper[4910]: I1125 21:49:34.646224 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.3609603359999998 podStartE2EDuration="5.646195696s" podCreationTimestamp="2025-11-25 21:49:29 +0000 UTC" firstStartedPulling="2025-11-25 21:49:30.494640496 +0000 UTC m=+1125.957116828" lastFinishedPulling="2025-11-25 21:49:33.779875866 +0000 UTC m=+1129.242352188" observedRunningTime="2025-11-25 21:49:34.642077086 +0000 UTC m=+1130.104553418" watchObservedRunningTime="2025-11-25 21:49:34.646195696 +0000 UTC m=+1130.108672018" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.008785 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.480229 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-9qz78"] Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.481899 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.495736 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-9qz78"] Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.500126 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.500853 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.621955 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-config-data\") pod \"nova-cell0-cell-mapping-9qz78\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.622040 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-scripts\") pod \"nova-cell0-cell-mapping-9qz78\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.622094 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btknp\" (UniqueName: \"kubernetes.io/projected/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-kube-api-access-btknp\") pod \"nova-cell0-cell-mapping-9qz78\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.622192 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-9qz78\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.724667 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-config-data\") pod \"nova-cell0-cell-mapping-9qz78\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.726049 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-scripts\") pod \"nova-cell0-cell-mapping-9qz78\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.726081 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btknp\" (UniqueName: \"kubernetes.io/projected/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-kube-api-access-btknp\") pod \"nova-cell0-cell-mapping-9qz78\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.726220 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-9qz78\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.730052 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.732346 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.748213 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-scripts\") pod \"nova-cell0-cell-mapping-9qz78\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.748718 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.783934 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btknp\" (UniqueName: \"kubernetes.io/projected/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-kube-api-access-btknp\") pod \"nova-cell0-cell-mapping-9qz78\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.789442 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-config-data\") pod \"nova-cell0-cell-mapping-9qz78\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.804065 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-9qz78\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.815648 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.823332 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.827996 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07059119-a90a-4392-9fba-163e2e8b9078-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " pod="openstack/nova-api-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.828129 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07059119-a90a-4392-9fba-163e2e8b9078-config-data\") pod \"nova-api-0\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " pod="openstack/nova-api-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.828286 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07059119-a90a-4392-9fba-163e2e8b9078-logs\") pod \"nova-api-0\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " pod="openstack/nova-api-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.828363 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cj29g\" (UniqueName: \"kubernetes.io/projected/07059119-a90a-4392-9fba-163e2e8b9078-kube-api-access-cj29g\") pod \"nova-api-0\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " pod="openstack/nova-api-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.844826 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.847020 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.850911 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.931798 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07059119-a90a-4392-9fba-163e2e8b9078-logs\") pod \"nova-api-0\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " pod="openstack/nova-api-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.932143 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cc7mr\" (UniqueName: \"kubernetes.io/projected/b4501328-05cd-4e57-913d-eb571cab69a4-kube-api-access-cc7mr\") pod \"nova-metadata-0\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " pod="openstack/nova-metadata-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.932341 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4501328-05cd-4e57-913d-eb571cab69a4-logs\") pod \"nova-metadata-0\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " pod="openstack/nova-metadata-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.932483 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07059119-a90a-4392-9fba-163e2e8b9078-logs\") pod \"nova-api-0\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " pod="openstack/nova-api-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.932494 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cj29g\" (UniqueName: \"kubernetes.io/projected/07059119-a90a-4392-9fba-163e2e8b9078-kube-api-access-cj29g\") pod \"nova-api-0\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " pod="openstack/nova-api-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.932600 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4501328-05cd-4e57-913d-eb571cab69a4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " pod="openstack/nova-metadata-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.932718 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07059119-a90a-4392-9fba-163e2e8b9078-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " pod="openstack/nova-api-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.932917 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07059119-a90a-4392-9fba-163e2e8b9078-config-data\") pod \"nova-api-0\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " pod="openstack/nova-api-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.932969 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4501328-05cd-4e57-913d-eb571cab69a4-config-data\") pod \"nova-metadata-0\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " pod="openstack/nova-metadata-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.939943 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.971141 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07059119-a90a-4392-9fba-163e2e8b9078-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " pod="openstack/nova-api-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.979337 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cj29g\" (UniqueName: \"kubernetes.io/projected/07059119-a90a-4392-9fba-163e2e8b9078-kube-api-access-cj29g\") pod \"nova-api-0\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " pod="openstack/nova-api-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.979673 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.981301 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 21:49:36 crc kubenswrapper[4910]: I1125 21:49:36.983212 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07059119-a90a-4392-9fba-163e2e8b9078-config-data\") pod \"nova-api-0\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " pod="openstack/nova-api-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.001330 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.004109 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.040080 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4501328-05cd-4e57-913d-eb571cab69a4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " pod="openstack/nova-metadata-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.040140 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21129bcd-1cf1-4613-84c7-4078cc11738d-config-data\") pod \"nova-scheduler-0\" (UID: \"21129bcd-1cf1-4613-84c7-4078cc11738d\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.040223 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4501328-05cd-4e57-913d-eb571cab69a4-config-data\") pod \"nova-metadata-0\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " pod="openstack/nova-metadata-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.040271 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8x6hx\" (UniqueName: \"kubernetes.io/projected/21129bcd-1cf1-4613-84c7-4078cc11738d-kube-api-access-8x6hx\") pod \"nova-scheduler-0\" (UID: \"21129bcd-1cf1-4613-84c7-4078cc11738d\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.040322 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21129bcd-1cf1-4613-84c7-4078cc11738d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"21129bcd-1cf1-4613-84c7-4078cc11738d\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.040346 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cc7mr\" (UniqueName: \"kubernetes.io/projected/b4501328-05cd-4e57-913d-eb571cab69a4-kube-api-access-cc7mr\") pod \"nova-metadata-0\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " pod="openstack/nova-metadata-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.040390 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4501328-05cd-4e57-913d-eb571cab69a4-logs\") pod \"nova-metadata-0\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " pod="openstack/nova-metadata-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.040877 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4501328-05cd-4e57-913d-eb571cab69a4-logs\") pod \"nova-metadata-0\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " pod="openstack/nova-metadata-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.074530 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.080188 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4501328-05cd-4e57-913d-eb571cab69a4-config-data\") pod \"nova-metadata-0\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " pod="openstack/nova-metadata-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.083446 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cc7mr\" (UniqueName: \"kubernetes.io/projected/b4501328-05cd-4e57-913d-eb571cab69a4-kube-api-access-cc7mr\") pod \"nova-metadata-0\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " pod="openstack/nova-metadata-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.083535 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-qcxp8"] Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.087411 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.101135 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4501328-05cd-4e57-913d-eb571cab69a4-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " pod="openstack/nova-metadata-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.112408 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.142023 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8x6hx\" (UniqueName: \"kubernetes.io/projected/21129bcd-1cf1-4613-84c7-4078cc11738d-kube-api-access-8x6hx\") pod \"nova-scheduler-0\" (UID: \"21129bcd-1cf1-4613-84c7-4078cc11738d\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.142085 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-config\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.142127 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21129bcd-1cf1-4613-84c7-4078cc11738d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"21129bcd-1cf1-4613-84c7-4078cc11738d\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.142154 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2pdls\" (UniqueName: \"kubernetes.io/projected/6c213fd9-da6e-4244-b118-c038c8b70341-kube-api-access-2pdls\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.142182 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.142269 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21129bcd-1cf1-4613-84c7-4078cc11738d-config-data\") pod \"nova-scheduler-0\" (UID: \"21129bcd-1cf1-4613-84c7-4078cc11738d\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.142292 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.142334 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-dns-svc\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.142411 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.149678 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21129bcd-1cf1-4613-84c7-4078cc11738d-config-data\") pod \"nova-scheduler-0\" (UID: \"21129bcd-1cf1-4613-84c7-4078cc11738d\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.153571 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21129bcd-1cf1-4613-84c7-4078cc11738d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"21129bcd-1cf1-4613-84c7-4078cc11738d\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.162326 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-qcxp8"] Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.180802 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8x6hx\" (UniqueName: \"kubernetes.io/projected/21129bcd-1cf1-4613-84c7-4078cc11738d-kube-api-access-8x6hx\") pod \"nova-scheduler-0\" (UID: \"21129bcd-1cf1-4613-84c7-4078cc11738d\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.190317 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.191995 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.201114 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.209040 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.248535 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.248621 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-dns-svc\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.248671 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.248728 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7f40bd9-6226-4bd6-ac74-12d030c150a8-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.248751 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-config\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.248775 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7f40bd9-6226-4bd6-ac74-12d030c150a8-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.248805 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sj6rn\" (UniqueName: \"kubernetes.io/projected/d7f40bd9-6226-4bd6-ac74-12d030c150a8-kube-api-access-sj6rn\") pod \"nova-cell1-novncproxy-0\" (UID: \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.248862 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2pdls\" (UniqueName: \"kubernetes.io/projected/6c213fd9-da6e-4244-b118-c038c8b70341-kube-api-access-2pdls\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.248889 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.256575 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-config\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.257482 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-ovsdbserver-sb\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.257728 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-ovsdbserver-nb\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.257938 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-dns-svc\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.261782 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-dns-swift-storage-0\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.284256 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2pdls\" (UniqueName: \"kubernetes.io/projected/6c213fd9-da6e-4244-b118-c038c8b70341-kube-api-access-2pdls\") pod \"dnsmasq-dns-865f5d856f-qcxp8\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.354727 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7f40bd9-6226-4bd6-ac74-12d030c150a8-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.354792 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7f40bd9-6226-4bd6-ac74-12d030c150a8-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.354828 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sj6rn\" (UniqueName: \"kubernetes.io/projected/d7f40bd9-6226-4bd6-ac74-12d030c150a8-kube-api-access-sj6rn\") pod \"nova-cell1-novncproxy-0\" (UID: \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.381969 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7f40bd9-6226-4bd6-ac74-12d030c150a8-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.383052 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7f40bd9-6226-4bd6-ac74-12d030c150a8-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.393115 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sj6rn\" (UniqueName: \"kubernetes.io/projected/d7f40bd9-6226-4bd6-ac74-12d030c150a8-kube-api-access-sj6rn\") pod \"nova-cell1-novncproxy-0\" (UID: \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.438891 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.466598 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.545160 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.737726 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-9qz78"] Nov 25 21:49:37 crc kubenswrapper[4910]: W1125 21:49:37.773410 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod74e4f4a8_4449_4432_bf1a_b82789fe0d3d.slice/crio-95579736fbd8d24c58f01449c219c63e9c4bdffbabdaec1d1525c4fc475785c3 WatchSource:0}: Error finding container 95579736fbd8d24c58f01449c219c63e9c4bdffbabdaec1d1525c4fc475785c3: Status 404 returned error can't find the container with id 95579736fbd8d24c58f01449c219c63e9c4bdffbabdaec1d1525c4fc475785c3 Nov 25 21:49:37 crc kubenswrapper[4910]: I1125 21:49:37.936528 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.090196 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-sng8f"] Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.091589 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.099613 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.100208 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.120975 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-sng8f"] Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.161418 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.193794 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5c857\" (UniqueName: \"kubernetes.io/projected/a821ca31-47ec-41c0-97c8-3d254b4f412a-kube-api-access-5c857\") pod \"nova-cell1-conductor-db-sync-sng8f\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.196665 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-sng8f\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.197063 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-scripts\") pod \"nova-cell1-conductor-db-sync-sng8f\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.197448 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-config-data\") pod \"nova-cell1-conductor-db-sync-sng8f\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.304137 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-scripts\") pod \"nova-cell1-conductor-db-sync-sng8f\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.304280 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-config-data\") pod \"nova-cell1-conductor-db-sync-sng8f\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.304421 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5c857\" (UniqueName: \"kubernetes.io/projected/a821ca31-47ec-41c0-97c8-3d254b4f412a-kube-api-access-5c857\") pod \"nova-cell1-conductor-db-sync-sng8f\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.304444 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-sng8f\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.311742 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-scripts\") pod \"nova-cell1-conductor-db-sync-sng8f\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.321063 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-sng8f\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.321878 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-config-data\") pod \"nova-cell1-conductor-db-sync-sng8f\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.325727 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5c857\" (UniqueName: \"kubernetes.io/projected/a821ca31-47ec-41c0-97c8-3d254b4f412a-kube-api-access-5c857\") pod \"nova-cell1-conductor-db-sync-sng8f\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.419330 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.434895 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.448915 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-qcxp8"] Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.628765 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:49:38 crc kubenswrapper[4910]: W1125 21:49:38.672226 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod21129bcd_1cf1_4613_84c7_4078cc11738d.slice/crio-ac9f7d55104516910dbdd8524c842fa613f16ee424e044d1ffc95232967a4491 WatchSource:0}: Error finding container ac9f7d55104516910dbdd8524c842fa613f16ee424e044d1ffc95232967a4491: Status 404 returned error can't find the container with id ac9f7d55104516910dbdd8524c842fa613f16ee424e044d1ffc95232967a4491 Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.745487 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07059119-a90a-4392-9fba-163e2e8b9078","Type":"ContainerStarted","Data":"1e5ce33f5f0d34279c4be3cc438c15153c6d9b861779bc62e427ff92a6059d60"} Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.753166 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-9qz78" event={"ID":"74e4f4a8-4449-4432-bf1a-b82789fe0d3d","Type":"ContainerStarted","Data":"7de067dbaf2d29d420601c9d7b4a54117b9eac09d18bf24eb53d9481ff83dec6"} Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.753225 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-9qz78" event={"ID":"74e4f4a8-4449-4432-bf1a-b82789fe0d3d","Type":"ContainerStarted","Data":"95579736fbd8d24c58f01449c219c63e9c4bdffbabdaec1d1525c4fc475785c3"} Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.760055 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b4501328-05cd-4e57-913d-eb571cab69a4","Type":"ContainerStarted","Data":"4f9b7229eafa112823a5971003716303d6bcc80e762bebbed98c190c41672132"} Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.764163 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d7f40bd9-6226-4bd6-ac74-12d030c150a8","Type":"ContainerStarted","Data":"a9211efccab60ec4fdbc4aac1eb87732ff534f1d64edd20367fe8d23ea473dd8"} Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.765902 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" event={"ID":"6c213fd9-da6e-4244-b118-c038c8b70341","Type":"ContainerStarted","Data":"bf1aad104e6035a2f003758f5f0269b588ab07f93f8710a505cbd0fc06031eb0"} Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.767444 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"21129bcd-1cf1-4613-84c7-4078cc11738d","Type":"ContainerStarted","Data":"ac9f7d55104516910dbdd8524c842fa613f16ee424e044d1ffc95232967a4491"} Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.783866 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-9qz78" podStartSLOduration=2.783838766 podStartE2EDuration="2.783838766s" podCreationTimestamp="2025-11-25 21:49:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:49:38.775061753 +0000 UTC m=+1134.237538085" watchObservedRunningTime="2025-11-25 21:49:38.783838766 +0000 UTC m=+1134.246315088" Nov 25 21:49:38 crc kubenswrapper[4910]: I1125 21:49:38.999134 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-sng8f"] Nov 25 21:49:39 crc kubenswrapper[4910]: I1125 21:49:39.790436 4910 generic.go:334] "Generic (PLEG): container finished" podID="6c213fd9-da6e-4244-b118-c038c8b70341" containerID="6c093136edfb85328a19d6cec35d7d75459ab93afed95bc361f1226185dd3af9" exitCode=0 Nov 25 21:49:39 crc kubenswrapper[4910]: I1125 21:49:39.790799 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" event={"ID":"6c213fd9-da6e-4244-b118-c038c8b70341","Type":"ContainerDied","Data":"6c093136edfb85328a19d6cec35d7d75459ab93afed95bc361f1226185dd3af9"} Nov 25 21:49:39 crc kubenswrapper[4910]: I1125 21:49:39.796573 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-sng8f" event={"ID":"a821ca31-47ec-41c0-97c8-3d254b4f412a","Type":"ContainerStarted","Data":"a83dd0fb339072beb5e0c6a124aaa77d14b7695af25a9a8848cdf6ab2a7846ba"} Nov 25 21:49:39 crc kubenswrapper[4910]: I1125 21:49:39.796618 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-sng8f" event={"ID":"a821ca31-47ec-41c0-97c8-3d254b4f412a","Type":"ContainerStarted","Data":"1fe69ea9027bbdbfa4f1674e5f86fa6a7debad74f7cd41e1c4c3ab5d631eb626"} Nov 25 21:49:39 crc kubenswrapper[4910]: I1125 21:49:39.857816 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-sng8f" podStartSLOduration=1.857792184 podStartE2EDuration="1.857792184s" podCreationTimestamp="2025-11-25 21:49:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:49:39.848732154 +0000 UTC m=+1135.311208476" watchObservedRunningTime="2025-11-25 21:49:39.857792184 +0000 UTC m=+1135.320268506" Nov 25 21:49:41 crc kubenswrapper[4910]: I1125 21:49:41.177307 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:41 crc kubenswrapper[4910]: I1125 21:49:41.221532 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 21:49:42 crc kubenswrapper[4910]: I1125 21:49:42.847657 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" event={"ID":"6c213fd9-da6e-4244-b118-c038c8b70341","Type":"ContainerStarted","Data":"f21ed05aacbe49a3c5fc46f00d4e5c8aa1187e383b9a3799560a46104bc56a48"} Nov 25 21:49:42 crc kubenswrapper[4910]: I1125 21:49:42.849090 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:42 crc kubenswrapper[4910]: I1125 21:49:42.856097 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"21129bcd-1cf1-4613-84c7-4078cc11738d","Type":"ContainerStarted","Data":"d2f620d772660f1a41f891f22352d87871a684fc4c6c890f81c8dde9db321244"} Nov 25 21:49:42 crc kubenswrapper[4910]: I1125 21:49:42.860013 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07059119-a90a-4392-9fba-163e2e8b9078","Type":"ContainerStarted","Data":"4741dc362ac9100fb6a242ed23b67dbcb6fe94d33813f771e153557584e08955"} Nov 25 21:49:42 crc kubenswrapper[4910]: I1125 21:49:42.866137 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b4501328-05cd-4e57-913d-eb571cab69a4","Type":"ContainerStarted","Data":"9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc"} Nov 25 21:49:42 crc kubenswrapper[4910]: I1125 21:49:42.875113 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d7f40bd9-6226-4bd6-ac74-12d030c150a8","Type":"ContainerStarted","Data":"a0f30096c54b6bf3575771bfcfc85d375f68b79e59596c85fbc12544841efbe4"} Nov 25 21:49:42 crc kubenswrapper[4910]: I1125 21:49:42.875221 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="d7f40bd9-6226-4bd6-ac74-12d030c150a8" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://a0f30096c54b6bf3575771bfcfc85d375f68b79e59596c85fbc12544841efbe4" gracePeriod=30 Nov 25 21:49:42 crc kubenswrapper[4910]: I1125 21:49:42.894290 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" podStartSLOduration=6.894224513 podStartE2EDuration="6.894224513s" podCreationTimestamp="2025-11-25 21:49:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:49:42.872664491 +0000 UTC m=+1138.335140813" watchObservedRunningTime="2025-11-25 21:49:42.894224513 +0000 UTC m=+1138.356700855" Nov 25 21:49:42 crc kubenswrapper[4910]: I1125 21:49:42.913528 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.307611135 podStartE2EDuration="6.913496804s" podCreationTimestamp="2025-11-25 21:49:36 +0000 UTC" firstStartedPulling="2025-11-25 21:49:38.689821611 +0000 UTC m=+1134.152297933" lastFinishedPulling="2025-11-25 21:49:42.29570728 +0000 UTC m=+1137.758183602" observedRunningTime="2025-11-25 21:49:42.894050458 +0000 UTC m=+1138.356526790" watchObservedRunningTime="2025-11-25 21:49:42.913496804 +0000 UTC m=+1138.375973136" Nov 25 21:49:42 crc kubenswrapper[4910]: I1125 21:49:42.921299 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.128621006 podStartE2EDuration="5.921273511s" podCreationTimestamp="2025-11-25 21:49:37 +0000 UTC" firstStartedPulling="2025-11-25 21:49:38.495842283 +0000 UTC m=+1133.958318605" lastFinishedPulling="2025-11-25 21:49:42.288494788 +0000 UTC m=+1137.750971110" observedRunningTime="2025-11-25 21:49:42.915291112 +0000 UTC m=+1138.377767454" watchObservedRunningTime="2025-11-25 21:49:42.921273511 +0000 UTC m=+1138.383749833" Nov 25 21:49:43 crc kubenswrapper[4910]: I1125 21:49:43.905451 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07059119-a90a-4392-9fba-163e2e8b9078","Type":"ContainerStarted","Data":"3cb3d85836c31eb46d009a9887448ea7da96fa8585d4023ef94906162869ba88"} Nov 25 21:49:43 crc kubenswrapper[4910]: I1125 21:49:43.913014 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b4501328-05cd-4e57-913d-eb571cab69a4","Type":"ContainerStarted","Data":"c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661"} Nov 25 21:49:43 crc kubenswrapper[4910]: I1125 21:49:43.913207 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b4501328-05cd-4e57-913d-eb571cab69a4" containerName="nova-metadata-log" containerID="cri-o://9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc" gracePeriod=30 Nov 25 21:49:43 crc kubenswrapper[4910]: I1125 21:49:43.913417 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b4501328-05cd-4e57-913d-eb571cab69a4" containerName="nova-metadata-metadata" containerID="cri-o://c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661" gracePeriod=30 Nov 25 21:49:43 crc kubenswrapper[4910]: I1125 21:49:43.967622 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.854128817 podStartE2EDuration="7.967600046s" podCreationTimestamp="2025-11-25 21:49:36 +0000 UTC" firstStartedPulling="2025-11-25 21:49:38.182122508 +0000 UTC m=+1133.644598830" lastFinishedPulling="2025-11-25 21:49:42.295593727 +0000 UTC m=+1137.758070059" observedRunningTime="2025-11-25 21:49:43.941817572 +0000 UTC m=+1139.404293894" watchObservedRunningTime="2025-11-25 21:49:43.967600046 +0000 UTC m=+1139.430076368" Nov 25 21:49:43 crc kubenswrapper[4910]: I1125 21:49:43.977412 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.653224046 podStartE2EDuration="7.977389216s" podCreationTimestamp="2025-11-25 21:49:36 +0000 UTC" firstStartedPulling="2025-11-25 21:49:37.971526949 +0000 UTC m=+1133.434003261" lastFinishedPulling="2025-11-25 21:49:42.295692109 +0000 UTC m=+1137.758168431" observedRunningTime="2025-11-25 21:49:43.961050822 +0000 UTC m=+1139.423527154" watchObservedRunningTime="2025-11-25 21:49:43.977389216 +0000 UTC m=+1139.439865538" Nov 25 21:49:44 crc kubenswrapper[4910]: E1125 21:49:44.132544 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4501328_05cd_4e57_913d_eb571cab69a4.slice/crio-9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4501328_05cd_4e57_913d_eb571cab69a4.slice/crio-conmon-9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb4501328_05cd_4e57_913d_eb571cab69a4.slice/crio-c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661.scope\": RecentStats: unable to find data in memory cache]" Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.726865 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.858359 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4501328-05cd-4e57-913d-eb571cab69a4-config-data\") pod \"b4501328-05cd-4e57-913d-eb571cab69a4\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.858604 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4501328-05cd-4e57-913d-eb571cab69a4-combined-ca-bundle\") pod \"b4501328-05cd-4e57-913d-eb571cab69a4\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.858666 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cc7mr\" (UniqueName: \"kubernetes.io/projected/b4501328-05cd-4e57-913d-eb571cab69a4-kube-api-access-cc7mr\") pod \"b4501328-05cd-4e57-913d-eb571cab69a4\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.858770 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4501328-05cd-4e57-913d-eb571cab69a4-logs\") pod \"b4501328-05cd-4e57-913d-eb571cab69a4\" (UID: \"b4501328-05cd-4e57-913d-eb571cab69a4\") " Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.859960 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b4501328-05cd-4e57-913d-eb571cab69a4-logs" (OuterVolumeSpecName: "logs") pod "b4501328-05cd-4e57-913d-eb571cab69a4" (UID: "b4501328-05cd-4e57-913d-eb571cab69a4"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.866527 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4501328-05cd-4e57-913d-eb571cab69a4-kube-api-access-cc7mr" (OuterVolumeSpecName: "kube-api-access-cc7mr") pod "b4501328-05cd-4e57-913d-eb571cab69a4" (UID: "b4501328-05cd-4e57-913d-eb571cab69a4"). InnerVolumeSpecName "kube-api-access-cc7mr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.896566 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4501328-05cd-4e57-913d-eb571cab69a4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b4501328-05cd-4e57-913d-eb571cab69a4" (UID: "b4501328-05cd-4e57-913d-eb571cab69a4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.899667 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4501328-05cd-4e57-913d-eb571cab69a4-config-data" (OuterVolumeSpecName: "config-data") pod "b4501328-05cd-4e57-913d-eb571cab69a4" (UID: "b4501328-05cd-4e57-913d-eb571cab69a4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.928862 4910 generic.go:334] "Generic (PLEG): container finished" podID="b4501328-05cd-4e57-913d-eb571cab69a4" containerID="c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661" exitCode=0 Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.928901 4910 generic.go:334] "Generic (PLEG): container finished" podID="b4501328-05cd-4e57-913d-eb571cab69a4" containerID="9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc" exitCode=143 Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.929064 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b4501328-05cd-4e57-913d-eb571cab69a4","Type":"ContainerDied","Data":"c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661"} Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.929185 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b4501328-05cd-4e57-913d-eb571cab69a4","Type":"ContainerDied","Data":"9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc"} Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.929207 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b4501328-05cd-4e57-913d-eb571cab69a4","Type":"ContainerDied","Data":"4f9b7229eafa112823a5971003716303d6bcc80e762bebbed98c190c41672132"} Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.929232 4910 scope.go:117] "RemoveContainer" containerID="c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661" Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.930107 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.964930 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cc7mr\" (UniqueName: \"kubernetes.io/projected/b4501328-05cd-4e57-913d-eb571cab69a4-kube-api-access-cc7mr\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.964998 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b4501328-05cd-4e57-913d-eb571cab69a4-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.965015 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b4501328-05cd-4e57-913d-eb571cab69a4-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:44 crc kubenswrapper[4910]: I1125 21:49:44.965029 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b4501328-05cd-4e57-913d-eb571cab69a4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.015209 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.027831 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.042379 4910 scope.go:117] "RemoveContainer" containerID="9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.053721 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:45 crc kubenswrapper[4910]: E1125 21:49:45.054817 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4501328-05cd-4e57-913d-eb571cab69a4" containerName="nova-metadata-log" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.054879 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4501328-05cd-4e57-913d-eb571cab69a4" containerName="nova-metadata-log" Nov 25 21:49:45 crc kubenswrapper[4910]: E1125 21:49:45.054920 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4501328-05cd-4e57-913d-eb571cab69a4" containerName="nova-metadata-metadata" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.055033 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4501328-05cd-4e57-913d-eb571cab69a4" containerName="nova-metadata-metadata" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.055709 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4501328-05cd-4e57-913d-eb571cab69a4" containerName="nova-metadata-log" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.055742 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4501328-05cd-4e57-913d-eb571cab69a4" containerName="nova-metadata-metadata" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.057734 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.063752 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.064079 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.068470 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.068582 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-config-data\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.068711 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kp656\" (UniqueName: \"kubernetes.io/projected/141c82a2-7ed1-4e1d-aaef-02e471a96029-kube-api-access-kp656\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.068879 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/141c82a2-7ed1-4e1d-aaef-02e471a96029-logs\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.068922 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.088937 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.107463 4910 scope.go:117] "RemoveContainer" containerID="c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661" Nov 25 21:49:45 crc kubenswrapper[4910]: E1125 21:49:45.108170 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661\": container with ID starting with c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661 not found: ID does not exist" containerID="c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.108229 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661"} err="failed to get container status \"c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661\": rpc error: code = NotFound desc = could not find container \"c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661\": container with ID starting with c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661 not found: ID does not exist" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.108351 4910 scope.go:117] "RemoveContainer" containerID="9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc" Nov 25 21:49:45 crc kubenswrapper[4910]: E1125 21:49:45.108746 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc\": container with ID starting with 9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc not found: ID does not exist" containerID="9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.108783 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc"} err="failed to get container status \"9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc\": rpc error: code = NotFound desc = could not find container \"9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc\": container with ID starting with 9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc not found: ID does not exist" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.108811 4910 scope.go:117] "RemoveContainer" containerID="c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.109621 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661"} err="failed to get container status \"c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661\": rpc error: code = NotFound desc = could not find container \"c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661\": container with ID starting with c97eefb68424809a51b1a43dd3e30cc6260e09f45a1215d9e1bca67807e2e661 not found: ID does not exist" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.109685 4910 scope.go:117] "RemoveContainer" containerID="9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.109987 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc"} err="failed to get container status \"9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc\": rpc error: code = NotFound desc = could not find container \"9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc\": container with ID starting with 9f9e4da56b4f1f9e41bad68da137e3dc16d7f2bf80b44fdec73d291e7c096dcc not found: ID does not exist" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.171176 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.171335 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.171372 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-config-data\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.171442 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kp656\" (UniqueName: \"kubernetes.io/projected/141c82a2-7ed1-4e1d-aaef-02e471a96029-kube-api-access-kp656\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.171529 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/141c82a2-7ed1-4e1d-aaef-02e471a96029-logs\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.172521 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/141c82a2-7ed1-4e1d-aaef-02e471a96029-logs\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.174775 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.175475 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.177512 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.188064 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-config-data\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.189218 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.189309 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kp656\" (UniqueName: \"kubernetes.io/projected/141c82a2-7ed1-4e1d-aaef-02e471a96029-kube-api-access-kp656\") pod \"nova-metadata-0\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " pod="openstack/nova-metadata-0" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.230692 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b4501328-05cd-4e57-913d-eb571cab69a4" path="/var/lib/kubelet/pods/b4501328-05cd-4e57-913d-eb571cab69a4/volumes" Nov 25 21:49:45 crc kubenswrapper[4910]: I1125 21:49:45.403438 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 21:49:46 crc kubenswrapper[4910]: I1125 21:49:46.025540 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:46 crc kubenswrapper[4910]: W1125 21:49:46.031349 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod141c82a2_7ed1_4e1d_aaef_02e471a96029.slice/crio-fdcdf50b45d18ef288ae1b019b0dfd2a9f7b523fd209021caaee9c888bba8717 WatchSource:0}: Error finding container fdcdf50b45d18ef288ae1b019b0dfd2a9f7b523fd209021caaee9c888bba8717: Status 404 returned error can't find the container with id fdcdf50b45d18ef288ae1b019b0dfd2a9f7b523fd209021caaee9c888bba8717 Nov 25 21:49:46 crc kubenswrapper[4910]: I1125 21:49:46.979205 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"141c82a2-7ed1-4e1d-aaef-02e471a96029","Type":"ContainerStarted","Data":"964f7e4d48628f63a32aecc0fe41cb70e4863a7b327a55bfd50bc3b9f2db2efb"} Nov 25 21:49:46 crc kubenswrapper[4910]: I1125 21:49:46.979737 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"141c82a2-7ed1-4e1d-aaef-02e471a96029","Type":"ContainerStarted","Data":"cee0e2e0bcf8c848e2f894f1ac53df0117c6641b6a721797d1ef7fac99cb792c"} Nov 25 21:49:46 crc kubenswrapper[4910]: I1125 21:49:46.979751 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"141c82a2-7ed1-4e1d-aaef-02e471a96029","Type":"ContainerStarted","Data":"fdcdf50b45d18ef288ae1b019b0dfd2a9f7b523fd209021caaee9c888bba8717"} Nov 25 21:49:47 crc kubenswrapper[4910]: I1125 21:49:47.003010 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.002983105 podStartE2EDuration="2.002983105s" podCreationTimestamp="2025-11-25 21:49:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:49:47.001307701 +0000 UTC m=+1142.463784033" watchObservedRunningTime="2025-11-25 21:49:47.002983105 +0000 UTC m=+1142.465459427" Nov 25 21:49:47 crc kubenswrapper[4910]: I1125 21:49:47.076252 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 21:49:47 crc kubenswrapper[4910]: I1125 21:49:47.076319 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 21:49:47 crc kubenswrapper[4910]: I1125 21:49:47.440768 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 21:49:47 crc kubenswrapper[4910]: I1125 21:49:47.440827 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 21:49:47 crc kubenswrapper[4910]: I1125 21:49:47.469448 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:49:47 crc kubenswrapper[4910]: I1125 21:49:47.475118 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 21:49:47 crc kubenswrapper[4910]: I1125 21:49:47.545727 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:49:47 crc kubenswrapper[4910]: I1125 21:49:47.569340 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-vdpzb"] Nov 25 21:49:47 crc kubenswrapper[4910]: I1125 21:49:47.569660 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" podUID="02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" containerName="dnsmasq-dns" containerID="cri-o://5e034bfb2e7ec8b0f89a5e253c7baab4a4c9b84474ae1d16c4683401e9054436" gracePeriod=10 Nov 25 21:49:47 crc kubenswrapper[4910]: I1125 21:49:47.997324 4910 generic.go:334] "Generic (PLEG): container finished" podID="74e4f4a8-4449-4432-bf1a-b82789fe0d3d" containerID="7de067dbaf2d29d420601c9d7b4a54117b9eac09d18bf24eb53d9481ff83dec6" exitCode=0 Nov 25 21:49:47 crc kubenswrapper[4910]: I1125 21:49:47.997820 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-9qz78" event={"ID":"74e4f4a8-4449-4432-bf1a-b82789fe0d3d","Type":"ContainerDied","Data":"7de067dbaf2d29d420601c9d7b4a54117b9eac09d18bf24eb53d9481ff83dec6"} Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.005918 4910 generic.go:334] "Generic (PLEG): container finished" podID="02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" containerID="5e034bfb2e7ec8b0f89a5e253c7baab4a4c9b84474ae1d16c4683401e9054436" exitCode=0 Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.006051 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" event={"ID":"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7","Type":"ContainerDied","Data":"5e034bfb2e7ec8b0f89a5e253c7baab4a4c9b84474ae1d16c4683401e9054436"} Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.113050 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.198393 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="07059119-a90a-4392-9fba-163e2e8b9078" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.186:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.198650 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="07059119-a90a-4392-9fba-163e2e8b9078" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.186:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.221379 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.285076 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-dns-svc\") pod \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.285284 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-dns-swift-storage-0\") pod \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.285335 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-config\") pod \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.285359 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-ovsdbserver-nb\") pod \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.285521 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kzx9r\" (UniqueName: \"kubernetes.io/projected/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-kube-api-access-kzx9r\") pod \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.285549 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-ovsdbserver-sb\") pod \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\" (UID: \"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7\") " Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.325615 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-kube-api-access-kzx9r" (OuterVolumeSpecName: "kube-api-access-kzx9r") pod "02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" (UID: "02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7"). InnerVolumeSpecName "kube-api-access-kzx9r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.390755 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kzx9r\" (UniqueName: \"kubernetes.io/projected/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-kube-api-access-kzx9r\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.405117 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" (UID: "02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.423122 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-config" (OuterVolumeSpecName: "config") pod "02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" (UID: "02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.430486 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" (UID: "02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.450914 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" (UID: "02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.469611 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" (UID: "02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.492622 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.492966 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.493028 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.493092 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:48 crc kubenswrapper[4910]: I1125 21:49:48.493146 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.029098 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.029398 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bb4fc677f-vdpzb" event={"ID":"02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7","Type":"ContainerDied","Data":"d2259b64c577c53300ea38c48132124f76b6c38854c65f7397ab80c0da193d1f"} Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.030678 4910 scope.go:117] "RemoveContainer" containerID="5e034bfb2e7ec8b0f89a5e253c7baab4a4c9b84474ae1d16c4683401e9054436" Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.031671 4910 generic.go:334] "Generic (PLEG): container finished" podID="a821ca31-47ec-41c0-97c8-3d254b4f412a" containerID="a83dd0fb339072beb5e0c6a124aaa77d14b7695af25a9a8848cdf6ab2a7846ba" exitCode=0 Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.031791 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-sng8f" event={"ID":"a821ca31-47ec-41c0-97c8-3d254b4f412a","Type":"ContainerDied","Data":"a83dd0fb339072beb5e0c6a124aaa77d14b7695af25a9a8848cdf6ab2a7846ba"} Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.091633 4910 scope.go:117] "RemoveContainer" containerID="3605e1b1778bd90b1d46db0d58970fee92cdd412c902c599ace17bf8779b9e1e" Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.149491 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-vdpzb"] Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.170557 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6bb4fc677f-vdpzb"] Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.226521 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" path="/var/lib/kubelet/pods/02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7/volumes" Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.564546 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.626254 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-config-data\") pod \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.626351 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-scripts\") pod \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.626394 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-btknp\" (UniqueName: \"kubernetes.io/projected/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-kube-api-access-btknp\") pod \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.626508 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-combined-ca-bundle\") pod \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\" (UID: \"74e4f4a8-4449-4432-bf1a-b82789fe0d3d\") " Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.645141 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-kube-api-access-btknp" (OuterVolumeSpecName: "kube-api-access-btknp") pod "74e4f4a8-4449-4432-bf1a-b82789fe0d3d" (UID: "74e4f4a8-4449-4432-bf1a-b82789fe0d3d"). InnerVolumeSpecName "kube-api-access-btknp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.645869 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-scripts" (OuterVolumeSpecName: "scripts") pod "74e4f4a8-4449-4432-bf1a-b82789fe0d3d" (UID: "74e4f4a8-4449-4432-bf1a-b82789fe0d3d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.672791 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "74e4f4a8-4449-4432-bf1a-b82789fe0d3d" (UID: "74e4f4a8-4449-4432-bf1a-b82789fe0d3d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.679618 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-config-data" (OuterVolumeSpecName: "config-data") pod "74e4f4a8-4449-4432-bf1a-b82789fe0d3d" (UID: "74e4f4a8-4449-4432-bf1a-b82789fe0d3d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.729316 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.729361 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.729375 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-btknp\" (UniqueName: \"kubernetes.io/projected/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-kube-api-access-btknp\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:49 crc kubenswrapper[4910]: I1125 21:49:49.729385 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/74e4f4a8-4449-4432-bf1a-b82789fe0d3d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.043906 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-9qz78" event={"ID":"74e4f4a8-4449-4432-bf1a-b82789fe0d3d","Type":"ContainerDied","Data":"95579736fbd8d24c58f01449c219c63e9c4bdffbabdaec1d1525c4fc475785c3"} Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.043941 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-9qz78" Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.043955 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95579736fbd8d24c58f01449c219c63e9c4bdffbabdaec1d1525c4fc475785c3" Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.266475 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.266937 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="07059119-a90a-4392-9fba-163e2e8b9078" containerName="nova-api-log" containerID="cri-o://4741dc362ac9100fb6a242ed23b67dbcb6fe94d33813f771e153557584e08955" gracePeriod=30 Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.267178 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="07059119-a90a-4392-9fba-163e2e8b9078" containerName="nova-api-api" containerID="cri-o://3cb3d85836c31eb46d009a9887448ea7da96fa8585d4023ef94906162869ba88" gracePeriod=30 Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.285396 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.312330 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.312638 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="141c82a2-7ed1-4e1d-aaef-02e471a96029" containerName="nova-metadata-log" containerID="cri-o://cee0e2e0bcf8c848e2f894f1ac53df0117c6641b6a721797d1ef7fac99cb792c" gracePeriod=30 Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.312848 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="141c82a2-7ed1-4e1d-aaef-02e471a96029" containerName="nova-metadata-metadata" containerID="cri-o://964f7e4d48628f63a32aecc0fe41cb70e4863a7b327a55bfd50bc3b9f2db2efb" gracePeriod=30 Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.405795 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.406412 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.510383 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.546095 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5c857\" (UniqueName: \"kubernetes.io/projected/a821ca31-47ec-41c0-97c8-3d254b4f412a-kube-api-access-5c857\") pod \"a821ca31-47ec-41c0-97c8-3d254b4f412a\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.546727 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-config-data\") pod \"a821ca31-47ec-41c0-97c8-3d254b4f412a\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.546872 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-combined-ca-bundle\") pod \"a821ca31-47ec-41c0-97c8-3d254b4f412a\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.547043 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-scripts\") pod \"a821ca31-47ec-41c0-97c8-3d254b4f412a\" (UID: \"a821ca31-47ec-41c0-97c8-3d254b4f412a\") " Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.553800 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a821ca31-47ec-41c0-97c8-3d254b4f412a-kube-api-access-5c857" (OuterVolumeSpecName: "kube-api-access-5c857") pod "a821ca31-47ec-41c0-97c8-3d254b4f412a" (UID: "a821ca31-47ec-41c0-97c8-3d254b4f412a"). InnerVolumeSpecName "kube-api-access-5c857". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.554189 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-scripts" (OuterVolumeSpecName: "scripts") pod "a821ca31-47ec-41c0-97c8-3d254b4f412a" (UID: "a821ca31-47ec-41c0-97c8-3d254b4f412a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.610654 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a821ca31-47ec-41c0-97c8-3d254b4f412a" (UID: "a821ca31-47ec-41c0-97c8-3d254b4f412a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.632431 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-config-data" (OuterVolumeSpecName: "config-data") pod "a821ca31-47ec-41c0-97c8-3d254b4f412a" (UID: "a821ca31-47ec-41c0-97c8-3d254b4f412a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.649656 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.649699 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.649714 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a821ca31-47ec-41c0-97c8-3d254b4f412a-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:50 crc kubenswrapper[4910]: I1125 21:49:50.649724 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5c857\" (UniqueName: \"kubernetes.io/projected/a821ca31-47ec-41c0-97c8-3d254b4f412a-kube-api-access-5c857\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.059605 4910 generic.go:334] "Generic (PLEG): container finished" podID="141c82a2-7ed1-4e1d-aaef-02e471a96029" containerID="cee0e2e0bcf8c848e2f894f1ac53df0117c6641b6a721797d1ef7fac99cb792c" exitCode=143 Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.059669 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"141c82a2-7ed1-4e1d-aaef-02e471a96029","Type":"ContainerDied","Data":"cee0e2e0bcf8c848e2f894f1ac53df0117c6641b6a721797d1ef7fac99cb792c"} Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.062406 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-sng8f" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.062430 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-sng8f" event={"ID":"a821ca31-47ec-41c0-97c8-3d254b4f412a","Type":"ContainerDied","Data":"1fe69ea9027bbdbfa4f1674e5f86fa6a7debad74f7cd41e1c4c3ab5d631eb626"} Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.062665 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1fe69ea9027bbdbfa4f1674e5f86fa6a7debad74f7cd41e1c4c3ab5d631eb626" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.068208 4910 generic.go:334] "Generic (PLEG): container finished" podID="07059119-a90a-4392-9fba-163e2e8b9078" containerID="4741dc362ac9100fb6a242ed23b67dbcb6fe94d33813f771e153557584e08955" exitCode=143 Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.068480 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="21129bcd-1cf1-4613-84c7-4078cc11738d" containerName="nova-scheduler-scheduler" containerID="cri-o://d2f620d772660f1a41f891f22352d87871a684fc4c6c890f81c8dde9db321244" gracePeriod=30 Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.068875 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07059119-a90a-4392-9fba-163e2e8b9078","Type":"ContainerDied","Data":"4741dc362ac9100fb6a242ed23b67dbcb6fe94d33813f771e153557584e08955"} Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.424491 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 21:49:51 crc kubenswrapper[4910]: E1125 21:49:51.425264 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="74e4f4a8-4449-4432-bf1a-b82789fe0d3d" containerName="nova-manage" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.425282 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="74e4f4a8-4449-4432-bf1a-b82789fe0d3d" containerName="nova-manage" Nov 25 21:49:51 crc kubenswrapper[4910]: E1125 21:49:51.425297 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" containerName="dnsmasq-dns" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.425306 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" containerName="dnsmasq-dns" Nov 25 21:49:51 crc kubenswrapper[4910]: E1125 21:49:51.425341 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a821ca31-47ec-41c0-97c8-3d254b4f412a" containerName="nova-cell1-conductor-db-sync" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.425351 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a821ca31-47ec-41c0-97c8-3d254b4f412a" containerName="nova-cell1-conductor-db-sync" Nov 25 21:49:51 crc kubenswrapper[4910]: E1125 21:49:51.425365 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" containerName="init" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.425373 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" containerName="init" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.425629 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="02bb38e5-05ca-49a5-a6f1-9af44e9b8fe7" containerName="dnsmasq-dns" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.425658 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="74e4f4a8-4449-4432-bf1a-b82789fe0d3d" containerName="nova-manage" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.425677 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a821ca31-47ec-41c0-97c8-3d254b4f412a" containerName="nova-cell1-conductor-db-sync" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.429974 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.437616 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.445718 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.591002 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-phhl6\" (UniqueName: \"kubernetes.io/projected/cc5eb389-8176-4989-8f45-a7a9631b286b-kube-api-access-phhl6\") pod \"nova-cell1-conductor-0\" (UID: \"cc5eb389-8176-4989-8f45-a7a9631b286b\") " pod="openstack/nova-cell1-conductor-0" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.591128 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc5eb389-8176-4989-8f45-a7a9631b286b-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"cc5eb389-8176-4989-8f45-a7a9631b286b\") " pod="openstack/nova-cell1-conductor-0" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.591182 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc5eb389-8176-4989-8f45-a7a9631b286b-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"cc5eb389-8176-4989-8f45-a7a9631b286b\") " pod="openstack/nova-cell1-conductor-0" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.693808 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc5eb389-8176-4989-8f45-a7a9631b286b-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"cc5eb389-8176-4989-8f45-a7a9631b286b\") " pod="openstack/nova-cell1-conductor-0" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.693919 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc5eb389-8176-4989-8f45-a7a9631b286b-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"cc5eb389-8176-4989-8f45-a7a9631b286b\") " pod="openstack/nova-cell1-conductor-0" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.694012 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-phhl6\" (UniqueName: \"kubernetes.io/projected/cc5eb389-8176-4989-8f45-a7a9631b286b-kube-api-access-phhl6\") pod \"nova-cell1-conductor-0\" (UID: \"cc5eb389-8176-4989-8f45-a7a9631b286b\") " pod="openstack/nova-cell1-conductor-0" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.703711 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cc5eb389-8176-4989-8f45-a7a9631b286b-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"cc5eb389-8176-4989-8f45-a7a9631b286b\") " pod="openstack/nova-cell1-conductor-0" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.714523 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cc5eb389-8176-4989-8f45-a7a9631b286b-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"cc5eb389-8176-4989-8f45-a7a9631b286b\") " pod="openstack/nova-cell1-conductor-0" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.717921 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-phhl6\" (UniqueName: \"kubernetes.io/projected/cc5eb389-8176-4989-8f45-a7a9631b286b-kube-api-access-phhl6\") pod \"nova-cell1-conductor-0\" (UID: \"cc5eb389-8176-4989-8f45-a7a9631b286b\") " pod="openstack/nova-cell1-conductor-0" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.749937 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 25 21:49:51 crc kubenswrapper[4910]: I1125 21:49:51.916428 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.083357 4910 generic.go:334] "Generic (PLEG): container finished" podID="141c82a2-7ed1-4e1d-aaef-02e471a96029" containerID="964f7e4d48628f63a32aecc0fe41cb70e4863a7b327a55bfd50bc3b9f2db2efb" exitCode=0 Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.083427 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.083459 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"141c82a2-7ed1-4e1d-aaef-02e471a96029","Type":"ContainerDied","Data":"964f7e4d48628f63a32aecc0fe41cb70e4863a7b327a55bfd50bc3b9f2db2efb"} Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.083532 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"141c82a2-7ed1-4e1d-aaef-02e471a96029","Type":"ContainerDied","Data":"fdcdf50b45d18ef288ae1b019b0dfd2a9f7b523fd209021caaee9c888bba8717"} Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.083567 4910 scope.go:117] "RemoveContainer" containerID="964f7e4d48628f63a32aecc0fe41cb70e4863a7b327a55bfd50bc3b9f2db2efb" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.105533 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-combined-ca-bundle\") pod \"141c82a2-7ed1-4e1d-aaef-02e471a96029\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.105907 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kp656\" (UniqueName: \"kubernetes.io/projected/141c82a2-7ed1-4e1d-aaef-02e471a96029-kube-api-access-kp656\") pod \"141c82a2-7ed1-4e1d-aaef-02e471a96029\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.105943 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-config-data\") pod \"141c82a2-7ed1-4e1d-aaef-02e471a96029\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.106321 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/141c82a2-7ed1-4e1d-aaef-02e471a96029-logs\") pod \"141c82a2-7ed1-4e1d-aaef-02e471a96029\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.106395 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-nova-metadata-tls-certs\") pod \"141c82a2-7ed1-4e1d-aaef-02e471a96029\" (UID: \"141c82a2-7ed1-4e1d-aaef-02e471a96029\") " Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.108510 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/141c82a2-7ed1-4e1d-aaef-02e471a96029-logs" (OuterVolumeSpecName: "logs") pod "141c82a2-7ed1-4e1d-aaef-02e471a96029" (UID: "141c82a2-7ed1-4e1d-aaef-02e471a96029"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.112322 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/141c82a2-7ed1-4e1d-aaef-02e471a96029-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.119108 4910 scope.go:117] "RemoveContainer" containerID="cee0e2e0bcf8c848e2f894f1ac53df0117c6641b6a721797d1ef7fac99cb792c" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.128075 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/141c82a2-7ed1-4e1d-aaef-02e471a96029-kube-api-access-kp656" (OuterVolumeSpecName: "kube-api-access-kp656") pod "141c82a2-7ed1-4e1d-aaef-02e471a96029" (UID: "141c82a2-7ed1-4e1d-aaef-02e471a96029"). InnerVolumeSpecName "kube-api-access-kp656". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.140066 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "141c82a2-7ed1-4e1d-aaef-02e471a96029" (UID: "141c82a2-7ed1-4e1d-aaef-02e471a96029"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.140316 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-config-data" (OuterVolumeSpecName: "config-data") pod "141c82a2-7ed1-4e1d-aaef-02e471a96029" (UID: "141c82a2-7ed1-4e1d-aaef-02e471a96029"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.182100 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "141c82a2-7ed1-4e1d-aaef-02e471a96029" (UID: "141c82a2-7ed1-4e1d-aaef-02e471a96029"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.183414 4910 scope.go:117] "RemoveContainer" containerID="964f7e4d48628f63a32aecc0fe41cb70e4863a7b327a55bfd50bc3b9f2db2efb" Nov 25 21:49:52 crc kubenswrapper[4910]: E1125 21:49:52.184349 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"964f7e4d48628f63a32aecc0fe41cb70e4863a7b327a55bfd50bc3b9f2db2efb\": container with ID starting with 964f7e4d48628f63a32aecc0fe41cb70e4863a7b327a55bfd50bc3b9f2db2efb not found: ID does not exist" containerID="964f7e4d48628f63a32aecc0fe41cb70e4863a7b327a55bfd50bc3b9f2db2efb" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.184427 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"964f7e4d48628f63a32aecc0fe41cb70e4863a7b327a55bfd50bc3b9f2db2efb"} err="failed to get container status \"964f7e4d48628f63a32aecc0fe41cb70e4863a7b327a55bfd50bc3b9f2db2efb\": rpc error: code = NotFound desc = could not find container \"964f7e4d48628f63a32aecc0fe41cb70e4863a7b327a55bfd50bc3b9f2db2efb\": container with ID starting with 964f7e4d48628f63a32aecc0fe41cb70e4863a7b327a55bfd50bc3b9f2db2efb not found: ID does not exist" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.184461 4910 scope.go:117] "RemoveContainer" containerID="cee0e2e0bcf8c848e2f894f1ac53df0117c6641b6a721797d1ef7fac99cb792c" Nov 25 21:49:52 crc kubenswrapper[4910]: E1125 21:49:52.184852 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cee0e2e0bcf8c848e2f894f1ac53df0117c6641b6a721797d1ef7fac99cb792c\": container with ID starting with cee0e2e0bcf8c848e2f894f1ac53df0117c6641b6a721797d1ef7fac99cb792c not found: ID does not exist" containerID="cee0e2e0bcf8c848e2f894f1ac53df0117c6641b6a721797d1ef7fac99cb792c" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.184872 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cee0e2e0bcf8c848e2f894f1ac53df0117c6641b6a721797d1ef7fac99cb792c"} err="failed to get container status \"cee0e2e0bcf8c848e2f894f1ac53df0117c6641b6a721797d1ef7fac99cb792c\": rpc error: code = NotFound desc = could not find container \"cee0e2e0bcf8c848e2f894f1ac53df0117c6641b6a721797d1ef7fac99cb792c\": container with ID starting with cee0e2e0bcf8c848e2f894f1ac53df0117c6641b6a721797d1ef7fac99cb792c not found: ID does not exist" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.214264 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.214334 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kp656\" (UniqueName: \"kubernetes.io/projected/141c82a2-7ed1-4e1d-aaef-02e471a96029-kube-api-access-kp656\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.214349 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.214358 4910 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/141c82a2-7ed1-4e1d-aaef-02e471a96029-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.279563 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 25 21:49:52 crc kubenswrapper[4910]: W1125 21:49:52.281730 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcc5eb389_8176_4989_8f45_a7a9631b286b.slice/crio-b7bb9c0d505e53f19929cf1ee17501c7c591da4b5b6fbf5caa9c87974083b4b9 WatchSource:0}: Error finding container b7bb9c0d505e53f19929cf1ee17501c7c591da4b5b6fbf5caa9c87974083b4b9: Status 404 returned error can't find the container with id b7bb9c0d505e53f19929cf1ee17501c7c591da4b5b6fbf5caa9c87974083b4b9 Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.442738 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:52 crc kubenswrapper[4910]: E1125 21:49:52.445316 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d2f620d772660f1a41f891f22352d87871a684fc4c6c890f81c8dde9db321244" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 21:49:52 crc kubenswrapper[4910]: E1125 21:49:52.448608 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d2f620d772660f1a41f891f22352d87871a684fc4c6c890f81c8dde9db321244" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 21:49:52 crc kubenswrapper[4910]: E1125 21:49:52.450354 4910 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d2f620d772660f1a41f891f22352d87871a684fc4c6c890f81c8dde9db321244" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Nov 25 21:49:52 crc kubenswrapper[4910]: E1125 21:49:52.450532 4910 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="21129bcd-1cf1-4613-84c7-4078cc11738d" containerName="nova-scheduler-scheduler" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.452725 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.486859 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:52 crc kubenswrapper[4910]: E1125 21:49:52.489669 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="141c82a2-7ed1-4e1d-aaef-02e471a96029" containerName="nova-metadata-log" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.489914 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="141c82a2-7ed1-4e1d-aaef-02e471a96029" containerName="nova-metadata-log" Nov 25 21:49:52 crc kubenswrapper[4910]: E1125 21:49:52.490016 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="141c82a2-7ed1-4e1d-aaef-02e471a96029" containerName="nova-metadata-metadata" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.490097 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="141c82a2-7ed1-4e1d-aaef-02e471a96029" containerName="nova-metadata-metadata" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.490796 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="141c82a2-7ed1-4e1d-aaef-02e471a96029" containerName="nova-metadata-log" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.490907 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="141c82a2-7ed1-4e1d-aaef-02e471a96029" containerName="nova-metadata-metadata" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.492141 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.495290 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.495615 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.515003 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.627176 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xw2js\" (UniqueName: \"kubernetes.io/projected/c3861cb8-f400-4715-88c0-96371e6f09f9-kube-api-access-xw2js\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.627879 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.627935 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3861cb8-f400-4715-88c0-96371e6f09f9-logs\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.628101 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-config-data\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.628452 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.730870 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xw2js\" (UniqueName: \"kubernetes.io/projected/c3861cb8-f400-4715-88c0-96371e6f09f9-kube-api-access-xw2js\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.731059 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.731103 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3861cb8-f400-4715-88c0-96371e6f09f9-logs\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.731153 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-config-data\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.731229 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.732018 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3861cb8-f400-4715-88c0-96371e6f09f9-logs\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.736458 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.736959 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.738365 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-config-data\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.750520 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xw2js\" (UniqueName: \"kubernetes.io/projected/c3861cb8-f400-4715-88c0-96371e6f09f9-kube-api-access-xw2js\") pod \"nova-metadata-0\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " pod="openstack/nova-metadata-0" Nov 25 21:49:52 crc kubenswrapper[4910]: I1125 21:49:52.824029 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 21:49:53 crc kubenswrapper[4910]: I1125 21:49:53.096382 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"cc5eb389-8176-4989-8f45-a7a9631b286b","Type":"ContainerStarted","Data":"864005e671973e37e6657dd8b0e502ca0fb257fbbe97bd50fafc3ab53fdfa900"} Nov 25 21:49:53 crc kubenswrapper[4910]: I1125 21:49:53.096934 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"cc5eb389-8176-4989-8f45-a7a9631b286b","Type":"ContainerStarted","Data":"b7bb9c0d505e53f19929cf1ee17501c7c591da4b5b6fbf5caa9c87974083b4b9"} Nov 25 21:49:53 crc kubenswrapper[4910]: I1125 21:49:53.102212 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 25 21:49:53 crc kubenswrapper[4910]: I1125 21:49:53.123603 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.123577016 podStartE2EDuration="2.123577016s" podCreationTimestamp="2025-11-25 21:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:49:53.119859768 +0000 UTC m=+1148.582336090" watchObservedRunningTime="2025-11-25 21:49:53.123577016 +0000 UTC m=+1148.586053338" Nov 25 21:49:53 crc kubenswrapper[4910]: I1125 21:49:53.221422 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="141c82a2-7ed1-4e1d-aaef-02e471a96029" path="/var/lib/kubelet/pods/141c82a2-7ed1-4e1d-aaef-02e471a96029/volumes" Nov 25 21:49:53 crc kubenswrapper[4910]: I1125 21:49:53.310036 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:49:53 crc kubenswrapper[4910]: W1125 21:49:53.322633 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc3861cb8_f400_4715_88c0_96371e6f09f9.slice/crio-0af0714eddcc81e4dad636cc7d4d7fa7d4686ebbe98c8b18590959de916d92b0 WatchSource:0}: Error finding container 0af0714eddcc81e4dad636cc7d4d7fa7d4686ebbe98c8b18590959de916d92b0: Status 404 returned error can't find the container with id 0af0714eddcc81e4dad636cc7d4d7fa7d4686ebbe98c8b18590959de916d92b0 Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.114289 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c3861cb8-f400-4715-88c0-96371e6f09f9","Type":"ContainerStarted","Data":"29aae7a7b6e935fc6d6afd62a40f9a7a9b0c951ab4211932740f3305c097725e"} Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.115198 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c3861cb8-f400-4715-88c0-96371e6f09f9","Type":"ContainerStarted","Data":"b657f1764c39782bb91df30bdbbcf90c759001179d59213518d0cfb401b2ada7"} Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.115226 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c3861cb8-f400-4715-88c0-96371e6f09f9","Type":"ContainerStarted","Data":"0af0714eddcc81e4dad636cc7d4d7fa7d4686ebbe98c8b18590959de916d92b0"} Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.119043 4910 generic.go:334] "Generic (PLEG): container finished" podID="07059119-a90a-4392-9fba-163e2e8b9078" containerID="3cb3d85836c31eb46d009a9887448ea7da96fa8585d4023ef94906162869ba88" exitCode=0 Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.119135 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07059119-a90a-4392-9fba-163e2e8b9078","Type":"ContainerDied","Data":"3cb3d85836c31eb46d009a9887448ea7da96fa8585d4023ef94906162869ba88"} Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.152482 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.152441869 podStartE2EDuration="2.152441869s" podCreationTimestamp="2025-11-25 21:49:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:49:54.132458229 +0000 UTC m=+1149.594934571" watchObservedRunningTime="2025-11-25 21:49:54.152441869 +0000 UTC m=+1149.614918221" Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.218699 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.375165 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07059119-a90a-4392-9fba-163e2e8b9078-config-data\") pod \"07059119-a90a-4392-9fba-163e2e8b9078\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.375432 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07059119-a90a-4392-9fba-163e2e8b9078-combined-ca-bundle\") pod \"07059119-a90a-4392-9fba-163e2e8b9078\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.375547 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cj29g\" (UniqueName: \"kubernetes.io/projected/07059119-a90a-4392-9fba-163e2e8b9078-kube-api-access-cj29g\") pod \"07059119-a90a-4392-9fba-163e2e8b9078\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.376125 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07059119-a90a-4392-9fba-163e2e8b9078-logs\") pod \"07059119-a90a-4392-9fba-163e2e8b9078\" (UID: \"07059119-a90a-4392-9fba-163e2e8b9078\") " Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.376576 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07059119-a90a-4392-9fba-163e2e8b9078-logs" (OuterVolumeSpecName: "logs") pod "07059119-a90a-4392-9fba-163e2e8b9078" (UID: "07059119-a90a-4392-9fba-163e2e8b9078"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.377030 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/07059119-a90a-4392-9fba-163e2e8b9078-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.382111 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07059119-a90a-4392-9fba-163e2e8b9078-kube-api-access-cj29g" (OuterVolumeSpecName: "kube-api-access-cj29g") pod "07059119-a90a-4392-9fba-163e2e8b9078" (UID: "07059119-a90a-4392-9fba-163e2e8b9078"). InnerVolumeSpecName "kube-api-access-cj29g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.404661 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07059119-a90a-4392-9fba-163e2e8b9078-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "07059119-a90a-4392-9fba-163e2e8b9078" (UID: "07059119-a90a-4392-9fba-163e2e8b9078"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.415182 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07059119-a90a-4392-9fba-163e2e8b9078-config-data" (OuterVolumeSpecName: "config-data") pod "07059119-a90a-4392-9fba-163e2e8b9078" (UID: "07059119-a90a-4392-9fba-163e2e8b9078"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.487153 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07059119-a90a-4392-9fba-163e2e8b9078-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.487217 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cj29g\" (UniqueName: \"kubernetes.io/projected/07059119-a90a-4392-9fba-163e2e8b9078-kube-api-access-cj29g\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:54 crc kubenswrapper[4910]: I1125 21:49:54.487232 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/07059119-a90a-4392-9fba-163e2e8b9078-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.131803 4910 generic.go:334] "Generic (PLEG): container finished" podID="21129bcd-1cf1-4613-84c7-4078cc11738d" containerID="d2f620d772660f1a41f891f22352d87871a684fc4c6c890f81c8dde9db321244" exitCode=0 Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.131944 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"21129bcd-1cf1-4613-84c7-4078cc11738d","Type":"ContainerDied","Data":"d2f620d772660f1a41f891f22352d87871a684fc4c6c890f81c8dde9db321244"} Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.132434 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"21129bcd-1cf1-4613-84c7-4078cc11738d","Type":"ContainerDied","Data":"ac9f7d55104516910dbdd8524c842fa613f16ee424e044d1ffc95232967a4491"} Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.132463 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ac9f7d55104516910dbdd8524c842fa613f16ee424e044d1ffc95232967a4491" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.135327 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"07059119-a90a-4392-9fba-163e2e8b9078","Type":"ContainerDied","Data":"1e5ce33f5f0d34279c4be3cc438c15153c6d9b861779bc62e427ff92a6059d60"} Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.135417 4910 scope.go:117] "RemoveContainer" containerID="3cb3d85836c31eb46d009a9887448ea7da96fa8585d4023ef94906162869ba88" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.135444 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.213950 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.218623 4910 scope.go:117] "RemoveContainer" containerID="4741dc362ac9100fb6a242ed23b67dbcb6fe94d33813f771e153557584e08955" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.255572 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.271850 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.281741 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 21:49:55 crc kubenswrapper[4910]: E1125 21:49:55.282413 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07059119-a90a-4392-9fba-163e2e8b9078" containerName="nova-api-log" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.282430 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="07059119-a90a-4392-9fba-163e2e8b9078" containerName="nova-api-log" Nov 25 21:49:55 crc kubenswrapper[4910]: E1125 21:49:55.282452 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07059119-a90a-4392-9fba-163e2e8b9078" containerName="nova-api-api" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.282460 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="07059119-a90a-4392-9fba-163e2e8b9078" containerName="nova-api-api" Nov 25 21:49:55 crc kubenswrapper[4910]: E1125 21:49:55.282479 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21129bcd-1cf1-4613-84c7-4078cc11738d" containerName="nova-scheduler-scheduler" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.282488 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="21129bcd-1cf1-4613-84c7-4078cc11738d" containerName="nova-scheduler-scheduler" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.282744 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="07059119-a90a-4392-9fba-163e2e8b9078" containerName="nova-api-log" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.282758 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="21129bcd-1cf1-4613-84c7-4078cc11738d" containerName="nova-scheduler-scheduler" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.282783 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="07059119-a90a-4392-9fba-163e2e8b9078" containerName="nova-api-api" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.284203 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.292904 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.313116 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.403786 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8x6hx\" (UniqueName: \"kubernetes.io/projected/21129bcd-1cf1-4613-84c7-4078cc11738d-kube-api-access-8x6hx\") pod \"21129bcd-1cf1-4613-84c7-4078cc11738d\" (UID: \"21129bcd-1cf1-4613-84c7-4078cc11738d\") " Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.404060 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21129bcd-1cf1-4613-84c7-4078cc11738d-combined-ca-bundle\") pod \"21129bcd-1cf1-4613-84c7-4078cc11738d\" (UID: \"21129bcd-1cf1-4613-84c7-4078cc11738d\") " Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.404120 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21129bcd-1cf1-4613-84c7-4078cc11738d-config-data\") pod \"21129bcd-1cf1-4613-84c7-4078cc11738d\" (UID: \"21129bcd-1cf1-4613-84c7-4078cc11738d\") " Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.405330 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6915039-96f0-4cbb-963f-ed1e3d652b74-config-data\") pod \"nova-api-0\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " pod="openstack/nova-api-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.405377 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6915039-96f0-4cbb-963f-ed1e3d652b74-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " pod="openstack/nova-api-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.405481 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6915039-96f0-4cbb-963f-ed1e3d652b74-logs\") pod \"nova-api-0\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " pod="openstack/nova-api-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.405675 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvqvg\" (UniqueName: \"kubernetes.io/projected/d6915039-96f0-4cbb-963f-ed1e3d652b74-kube-api-access-kvqvg\") pod \"nova-api-0\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " pod="openstack/nova-api-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.411048 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21129bcd-1cf1-4613-84c7-4078cc11738d-kube-api-access-8x6hx" (OuterVolumeSpecName: "kube-api-access-8x6hx") pod "21129bcd-1cf1-4613-84c7-4078cc11738d" (UID: "21129bcd-1cf1-4613-84c7-4078cc11738d"). InnerVolumeSpecName "kube-api-access-8x6hx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.434969 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21129bcd-1cf1-4613-84c7-4078cc11738d-config-data" (OuterVolumeSpecName: "config-data") pod "21129bcd-1cf1-4613-84c7-4078cc11738d" (UID: "21129bcd-1cf1-4613-84c7-4078cc11738d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.437175 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/21129bcd-1cf1-4613-84c7-4078cc11738d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "21129bcd-1cf1-4613-84c7-4078cc11738d" (UID: "21129bcd-1cf1-4613-84c7-4078cc11738d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.507003 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvqvg\" (UniqueName: \"kubernetes.io/projected/d6915039-96f0-4cbb-963f-ed1e3d652b74-kube-api-access-kvqvg\") pod \"nova-api-0\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " pod="openstack/nova-api-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.507103 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6915039-96f0-4cbb-963f-ed1e3d652b74-config-data\") pod \"nova-api-0\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " pod="openstack/nova-api-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.507127 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6915039-96f0-4cbb-963f-ed1e3d652b74-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " pod="openstack/nova-api-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.507178 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6915039-96f0-4cbb-963f-ed1e3d652b74-logs\") pod \"nova-api-0\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " pod="openstack/nova-api-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.507341 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21129bcd-1cf1-4613-84c7-4078cc11738d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.507354 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21129bcd-1cf1-4613-84c7-4078cc11738d-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.507366 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8x6hx\" (UniqueName: \"kubernetes.io/projected/21129bcd-1cf1-4613-84c7-4078cc11738d-kube-api-access-8x6hx\") on node \"crc\" DevicePath \"\"" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.507728 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6915039-96f0-4cbb-963f-ed1e3d652b74-logs\") pod \"nova-api-0\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " pod="openstack/nova-api-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.511947 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6915039-96f0-4cbb-963f-ed1e3d652b74-config-data\") pod \"nova-api-0\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " pod="openstack/nova-api-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.514150 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6915039-96f0-4cbb-963f-ed1e3d652b74-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " pod="openstack/nova-api-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.536138 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvqvg\" (UniqueName: \"kubernetes.io/projected/d6915039-96f0-4cbb-963f-ed1e3d652b74-kube-api-access-kvqvg\") pod \"nova-api-0\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " pod="openstack/nova-api-0" Nov 25 21:49:55 crc kubenswrapper[4910]: I1125 21:49:55.605264 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.083228 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.152142 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6915039-96f0-4cbb-963f-ed1e3d652b74","Type":"ContainerStarted","Data":"88a17135047f84b572511c727e2c04ba8998712593a653e693a2f4e50ad924cb"} Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.152169 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.247061 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.259013 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.278752 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.280404 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.284785 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.290438 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.428328 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.428815 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6v6t\" (UniqueName: \"kubernetes.io/projected/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-kube-api-access-g6v6t\") pod \"nova-scheduler-0\" (UID: \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.429152 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-config-data\") pod \"nova-scheduler-0\" (UID: \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.532068 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6v6t\" (UniqueName: \"kubernetes.io/projected/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-kube-api-access-g6v6t\") pod \"nova-scheduler-0\" (UID: \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.532235 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-config-data\") pod \"nova-scheduler-0\" (UID: \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.532433 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.538746 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.540070 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-config-data\") pod \"nova-scheduler-0\" (UID: \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.551346 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6v6t\" (UniqueName: \"kubernetes.io/projected/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-kube-api-access-g6v6t\") pod \"nova-scheduler-0\" (UID: \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\") " pod="openstack/nova-scheduler-0" Nov 25 21:49:56 crc kubenswrapper[4910]: I1125 21:49:56.616476 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 21:49:57 crc kubenswrapper[4910]: I1125 21:49:57.139765 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:49:57 crc kubenswrapper[4910]: I1125 21:49:57.166225 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b0a023f7-9af9-4c06-9838-0923ce4cf5a1","Type":"ContainerStarted","Data":"9077b39b63ae50336e97bd082c5c512809325329316c32dc3a1402cc523b304b"} Nov 25 21:49:57 crc kubenswrapper[4910]: I1125 21:49:57.169408 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6915039-96f0-4cbb-963f-ed1e3d652b74","Type":"ContainerStarted","Data":"2cefd9f093938860b5f0f11317e98e898a75fb5eaff6bfb763e277ff3ac362e0"} Nov 25 21:49:57 crc kubenswrapper[4910]: I1125 21:49:57.169511 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6915039-96f0-4cbb-963f-ed1e3d652b74","Type":"ContainerStarted","Data":"d4a3372df3cc1fdffd724ef2234100e51c21780029a0e3b346d72ec04d8d06ae"} Nov 25 21:49:57 crc kubenswrapper[4910]: I1125 21:49:57.195649 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.195627896 podStartE2EDuration="2.195627896s" podCreationTimestamp="2025-11-25 21:49:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:49:57.188323502 +0000 UTC m=+1152.650799824" watchObservedRunningTime="2025-11-25 21:49:57.195627896 +0000 UTC m=+1152.658104218" Nov 25 21:49:57 crc kubenswrapper[4910]: I1125 21:49:57.222733 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07059119-a90a-4392-9fba-163e2e8b9078" path="/var/lib/kubelet/pods/07059119-a90a-4392-9fba-163e2e8b9078/volumes" Nov 25 21:49:57 crc kubenswrapper[4910]: I1125 21:49:57.224510 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21129bcd-1cf1-4613-84c7-4078cc11738d" path="/var/lib/kubelet/pods/21129bcd-1cf1-4613-84c7-4078cc11738d/volumes" Nov 25 21:49:57 crc kubenswrapper[4910]: I1125 21:49:57.824743 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 21:49:57 crc kubenswrapper[4910]: I1125 21:49:57.824857 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 21:49:58 crc kubenswrapper[4910]: I1125 21:49:58.207502 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b0a023f7-9af9-4c06-9838-0923ce4cf5a1","Type":"ContainerStarted","Data":"f84fc7ec83e2cad6fad9a1d532ec9ff1cca8cb47873fef011834ed27149fdd77"} Nov 25 21:49:58 crc kubenswrapper[4910]: I1125 21:49:58.267145 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.267120039 podStartE2EDuration="2.267120039s" podCreationTimestamp="2025-11-25 21:49:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:49:58.255073409 +0000 UTC m=+1153.717549811" watchObservedRunningTime="2025-11-25 21:49:58.267120039 +0000 UTC m=+1153.729596371" Nov 25 21:49:59 crc kubenswrapper[4910]: I1125 21:49:59.973902 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 21:50:01 crc kubenswrapper[4910]: I1125 21:50:01.616718 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 21:50:01 crc kubenswrapper[4910]: I1125 21:50:01.797085 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 25 21:50:02 crc kubenswrapper[4910]: I1125 21:50:02.824994 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 21:50:02 crc kubenswrapper[4910]: I1125 21:50:02.825424 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 21:50:03 crc kubenswrapper[4910]: I1125 21:50:03.838488 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="c3861cb8-f400-4715-88c0-96371e6f09f9" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.194:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 21:50:03 crc kubenswrapper[4910]: I1125 21:50:03.838488 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="c3861cb8-f400-4715-88c0-96371e6f09f9" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.194:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 21:50:04 crc kubenswrapper[4910]: I1125 21:50:04.264176 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 21:50:04 crc kubenswrapper[4910]: I1125 21:50:04.264452 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="28a65abf-02ed-47dd-a7ce-0cc927aac523" containerName="kube-state-metrics" containerID="cri-o://61fb033907cb5d2113f4769d2932fc67220ab269e1c601f9d062403287425132" gracePeriod=30 Nov 25 21:50:04 crc kubenswrapper[4910]: I1125 21:50:04.802033 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 21:50:04 crc kubenswrapper[4910]: I1125 21:50:04.979590 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cnrbm\" (UniqueName: \"kubernetes.io/projected/28a65abf-02ed-47dd-a7ce-0cc927aac523-kube-api-access-cnrbm\") pod \"28a65abf-02ed-47dd-a7ce-0cc927aac523\" (UID: \"28a65abf-02ed-47dd-a7ce-0cc927aac523\") " Nov 25 21:50:04 crc kubenswrapper[4910]: I1125 21:50:04.987737 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28a65abf-02ed-47dd-a7ce-0cc927aac523-kube-api-access-cnrbm" (OuterVolumeSpecName: "kube-api-access-cnrbm") pod "28a65abf-02ed-47dd-a7ce-0cc927aac523" (UID: "28a65abf-02ed-47dd-a7ce-0cc927aac523"). InnerVolumeSpecName "kube-api-access-cnrbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.082594 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cnrbm\" (UniqueName: \"kubernetes.io/projected/28a65abf-02ed-47dd-a7ce-0cc927aac523-kube-api-access-cnrbm\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.316954 4910 generic.go:334] "Generic (PLEG): container finished" podID="28a65abf-02ed-47dd-a7ce-0cc927aac523" containerID="61fb033907cb5d2113f4769d2932fc67220ab269e1c601f9d062403287425132" exitCode=2 Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.317024 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"28a65abf-02ed-47dd-a7ce-0cc927aac523","Type":"ContainerDied","Data":"61fb033907cb5d2113f4769d2932fc67220ab269e1c601f9d062403287425132"} Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.317075 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"28a65abf-02ed-47dd-a7ce-0cc927aac523","Type":"ContainerDied","Data":"92dc1098b82946f5cca0810ce5172bd72411cb7faeb1a0ca4364e2d0fc9c04a0"} Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.317093 4910 scope.go:117] "RemoveContainer" containerID="61fb033907cb5d2113f4769d2932fc67220ab269e1c601f9d062403287425132" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.317041 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.345962 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.351561 4910 scope.go:117] "RemoveContainer" containerID="61fb033907cb5d2113f4769d2932fc67220ab269e1c601f9d062403287425132" Nov 25 21:50:05 crc kubenswrapper[4910]: E1125 21:50:05.352270 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61fb033907cb5d2113f4769d2932fc67220ab269e1c601f9d062403287425132\": container with ID starting with 61fb033907cb5d2113f4769d2932fc67220ab269e1c601f9d062403287425132 not found: ID does not exist" containerID="61fb033907cb5d2113f4769d2932fc67220ab269e1c601f9d062403287425132" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.352322 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61fb033907cb5d2113f4769d2932fc67220ab269e1c601f9d062403287425132"} err="failed to get container status \"61fb033907cb5d2113f4769d2932fc67220ab269e1c601f9d062403287425132\": rpc error: code = NotFound desc = could not find container \"61fb033907cb5d2113f4769d2932fc67220ab269e1c601f9d062403287425132\": container with ID starting with 61fb033907cb5d2113f4769d2932fc67220ab269e1c601f9d062403287425132 not found: ID does not exist" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.360112 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.375157 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 21:50:05 crc kubenswrapper[4910]: E1125 21:50:05.375734 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28a65abf-02ed-47dd-a7ce-0cc927aac523" containerName="kube-state-metrics" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.375758 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="28a65abf-02ed-47dd-a7ce-0cc927aac523" containerName="kube-state-metrics" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.375959 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="28a65abf-02ed-47dd-a7ce-0cc927aac523" containerName="kube-state-metrics" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.376731 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.379970 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.380078 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.383741 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.387929 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/eaec4e2c-bb9a-4c1f-80d5-c93dce82233e-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"eaec4e2c-bb9a-4c1f-80d5-c93dce82233e\") " pod="openstack/kube-state-metrics-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.387981 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/eaec4e2c-bb9a-4c1f-80d5-c93dce82233e-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"eaec4e2c-bb9a-4c1f-80d5-c93dce82233e\") " pod="openstack/kube-state-metrics-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.388002 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4fhc\" (UniqueName: \"kubernetes.io/projected/eaec4e2c-bb9a-4c1f-80d5-c93dce82233e-kube-api-access-q4fhc\") pod \"kube-state-metrics-0\" (UID: \"eaec4e2c-bb9a-4c1f-80d5-c93dce82233e\") " pod="openstack/kube-state-metrics-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.388026 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eaec4e2c-bb9a-4c1f-80d5-c93dce82233e-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"eaec4e2c-bb9a-4c1f-80d5-c93dce82233e\") " pod="openstack/kube-state-metrics-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.489838 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/eaec4e2c-bb9a-4c1f-80d5-c93dce82233e-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"eaec4e2c-bb9a-4c1f-80d5-c93dce82233e\") " pod="openstack/kube-state-metrics-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.489901 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/eaec4e2c-bb9a-4c1f-80d5-c93dce82233e-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"eaec4e2c-bb9a-4c1f-80d5-c93dce82233e\") " pod="openstack/kube-state-metrics-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.489928 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4fhc\" (UniqueName: \"kubernetes.io/projected/eaec4e2c-bb9a-4c1f-80d5-c93dce82233e-kube-api-access-q4fhc\") pod \"kube-state-metrics-0\" (UID: \"eaec4e2c-bb9a-4c1f-80d5-c93dce82233e\") " pod="openstack/kube-state-metrics-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.489955 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eaec4e2c-bb9a-4c1f-80d5-c93dce82233e-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"eaec4e2c-bb9a-4c1f-80d5-c93dce82233e\") " pod="openstack/kube-state-metrics-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.509201 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eaec4e2c-bb9a-4c1f-80d5-c93dce82233e-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"eaec4e2c-bb9a-4c1f-80d5-c93dce82233e\") " pod="openstack/kube-state-metrics-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.509294 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/eaec4e2c-bb9a-4c1f-80d5-c93dce82233e-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"eaec4e2c-bb9a-4c1f-80d5-c93dce82233e\") " pod="openstack/kube-state-metrics-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.517934 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/eaec4e2c-bb9a-4c1f-80d5-c93dce82233e-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"eaec4e2c-bb9a-4c1f-80d5-c93dce82233e\") " pod="openstack/kube-state-metrics-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.520983 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4fhc\" (UniqueName: \"kubernetes.io/projected/eaec4e2c-bb9a-4c1f-80d5-c93dce82233e-kube-api-access-q4fhc\") pod \"kube-state-metrics-0\" (UID: \"eaec4e2c-bb9a-4c1f-80d5-c93dce82233e\") " pod="openstack/kube-state-metrics-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.605542 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.605969 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 21:50:05 crc kubenswrapper[4910]: I1125 21:50:05.707518 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 25 21:50:06 crc kubenswrapper[4910]: I1125 21:50:06.214647 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 25 21:50:06 crc kubenswrapper[4910]: I1125 21:50:06.303208 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:06 crc kubenswrapper[4910]: I1125 21:50:06.303554 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="ceilometer-central-agent" containerID="cri-o://63f726954dc834029c3701691afaafacd775f92d700ed27908c265c6addf97d6" gracePeriod=30 Nov 25 21:50:06 crc kubenswrapper[4910]: I1125 21:50:06.303667 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="ceilometer-notification-agent" containerID="cri-o://9c9e6b721a2a063bee8d5e70902a93971d4989ad8b50482f6efc7989ae27c276" gracePeriod=30 Nov 25 21:50:06 crc kubenswrapper[4910]: I1125 21:50:06.303742 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="sg-core" containerID="cri-o://f2fbfa0a15dda9fc0c46bf6aa78b8ad7cdb8f6f3ed839307df135fb701a63c0f" gracePeriod=30 Nov 25 21:50:06 crc kubenswrapper[4910]: I1125 21:50:06.303666 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="proxy-httpd" containerID="cri-o://e81fdca7f0f754e4680f00d08defa429b51b16f99db2da0b49b542973eec3507" gracePeriod=30 Nov 25 21:50:06 crc kubenswrapper[4910]: I1125 21:50:06.328953 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"eaec4e2c-bb9a-4c1f-80d5-c93dce82233e","Type":"ContainerStarted","Data":"cdd4e835f584f3a2ee260753d64e20ad00a03958148bf337a4922b7d853f58ee"} Nov 25 21:50:06 crc kubenswrapper[4910]: I1125 21:50:06.617681 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 21:50:06 crc kubenswrapper[4910]: I1125 21:50:06.659883 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 21:50:06 crc kubenswrapper[4910]: I1125 21:50:06.688496 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d6915039-96f0-4cbb-963f-ed1e3d652b74" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.195:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 21:50:06 crc kubenswrapper[4910]: I1125 21:50:06.688860 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d6915039-96f0-4cbb-963f-ed1e3d652b74" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.195:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 21:50:07 crc kubenswrapper[4910]: I1125 21:50:07.221732 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28a65abf-02ed-47dd-a7ce-0cc927aac523" path="/var/lib/kubelet/pods/28a65abf-02ed-47dd-a7ce-0cc927aac523/volumes" Nov 25 21:50:07 crc kubenswrapper[4910]: I1125 21:50:07.341792 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"eaec4e2c-bb9a-4c1f-80d5-c93dce82233e","Type":"ContainerStarted","Data":"f8d21daf4bb61823646ae363869685de6c34bf19861d8325f7d8395ac0fde645"} Nov 25 21:50:07 crc kubenswrapper[4910]: I1125 21:50:07.342757 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 25 21:50:07 crc kubenswrapper[4910]: I1125 21:50:07.346907 4910 generic.go:334] "Generic (PLEG): container finished" podID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerID="e81fdca7f0f754e4680f00d08defa429b51b16f99db2da0b49b542973eec3507" exitCode=0 Nov 25 21:50:07 crc kubenswrapper[4910]: I1125 21:50:07.346952 4910 generic.go:334] "Generic (PLEG): container finished" podID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerID="f2fbfa0a15dda9fc0c46bf6aa78b8ad7cdb8f6f3ed839307df135fb701a63c0f" exitCode=2 Nov 25 21:50:07 crc kubenswrapper[4910]: I1125 21:50:07.346966 4910 generic.go:334] "Generic (PLEG): container finished" podID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerID="63f726954dc834029c3701691afaafacd775f92d700ed27908c265c6addf97d6" exitCode=0 Nov 25 21:50:07 crc kubenswrapper[4910]: I1125 21:50:07.347199 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd9a223d-c5ad-44be-8e56-08129fd5b3ba","Type":"ContainerDied","Data":"e81fdca7f0f754e4680f00d08defa429b51b16f99db2da0b49b542973eec3507"} Nov 25 21:50:07 crc kubenswrapper[4910]: I1125 21:50:07.347356 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd9a223d-c5ad-44be-8e56-08129fd5b3ba","Type":"ContainerDied","Data":"f2fbfa0a15dda9fc0c46bf6aa78b8ad7cdb8f6f3ed839307df135fb701a63c0f"} Nov 25 21:50:07 crc kubenswrapper[4910]: I1125 21:50:07.347441 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd9a223d-c5ad-44be-8e56-08129fd5b3ba","Type":"ContainerDied","Data":"63f726954dc834029c3701691afaafacd775f92d700ed27908c265c6addf97d6"} Nov 25 21:50:07 crc kubenswrapper[4910]: I1125 21:50:07.364912 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.013673426 podStartE2EDuration="2.364889836s" podCreationTimestamp="2025-11-25 21:50:05 +0000 UTC" firstStartedPulling="2025-11-25 21:50:06.222068189 +0000 UTC m=+1161.684544551" lastFinishedPulling="2025-11-25 21:50:06.573284639 +0000 UTC m=+1162.035760961" observedRunningTime="2025-11-25 21:50:07.36202866 +0000 UTC m=+1162.824504982" watchObservedRunningTime="2025-11-25 21:50:07.364889836 +0000 UTC m=+1162.827366158" Nov 25 21:50:07 crc kubenswrapper[4910]: I1125 21:50:07.387389 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.388472 4910 generic.go:334] "Generic (PLEG): container finished" podID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerID="9c9e6b721a2a063bee8d5e70902a93971d4989ad8b50482f6efc7989ae27c276" exitCode=0 Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.388550 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd9a223d-c5ad-44be-8e56-08129fd5b3ba","Type":"ContainerDied","Data":"9c9e6b721a2a063bee8d5e70902a93971d4989ad8b50482f6efc7989ae27c276"} Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.389371 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd9a223d-c5ad-44be-8e56-08129fd5b3ba","Type":"ContainerDied","Data":"671d8d9774821a8057af57d82e1a1f66b34a358a6a945ebead6fd0332aa81502"} Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.389402 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="671d8d9774821a8057af57d82e1a1f66b34a358a6a945ebead6fd0332aa81502" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.403054 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.527526 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-log-httpd\") pod \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.527968 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-sg-core-conf-yaml\") pod \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.528134 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-combined-ca-bundle\") pod \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.528166 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-run-httpd\") pod \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.528258 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-scripts\") pod \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.528326 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pr2fw\" (UniqueName: \"kubernetes.io/projected/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-kube-api-access-pr2fw\") pod \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.528352 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-config-data\") pod \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\" (UID: \"bd9a223d-c5ad-44be-8e56-08129fd5b3ba\") " Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.531615 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "bd9a223d-c5ad-44be-8e56-08129fd5b3ba" (UID: "bd9a223d-c5ad-44be-8e56-08129fd5b3ba"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.534129 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "bd9a223d-c5ad-44be-8e56-08129fd5b3ba" (UID: "bd9a223d-c5ad-44be-8e56-08129fd5b3ba"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.540168 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-scripts" (OuterVolumeSpecName: "scripts") pod "bd9a223d-c5ad-44be-8e56-08129fd5b3ba" (UID: "bd9a223d-c5ad-44be-8e56-08129fd5b3ba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.555514 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-kube-api-access-pr2fw" (OuterVolumeSpecName: "kube-api-access-pr2fw") pod "bd9a223d-c5ad-44be-8e56-08129fd5b3ba" (UID: "bd9a223d-c5ad-44be-8e56-08129fd5b3ba"). InnerVolumeSpecName "kube-api-access-pr2fw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.634106 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "bd9a223d-c5ad-44be-8e56-08129fd5b3ba" (UID: "bd9a223d-c5ad-44be-8e56-08129fd5b3ba"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.634846 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.634869 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.634881 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.634893 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pr2fw\" (UniqueName: \"kubernetes.io/projected/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-kube-api-access-pr2fw\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.634905 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.663492 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd9a223d-c5ad-44be-8e56-08129fd5b3ba" (UID: "bd9a223d-c5ad-44be-8e56-08129fd5b3ba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.682901 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-config-data" (OuterVolumeSpecName: "config-data") pod "bd9a223d-c5ad-44be-8e56-08129fd5b3ba" (UID: "bd9a223d-c5ad-44be-8e56-08129fd5b3ba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.737183 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:10 crc kubenswrapper[4910]: I1125 21:50:10.737226 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9a223d-c5ad-44be-8e56-08129fd5b3ba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.405754 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.486747 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.507303 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.519583 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:11 crc kubenswrapper[4910]: E1125 21:50:11.520200 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="ceilometer-central-agent" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.520222 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="ceilometer-central-agent" Nov 25 21:50:11 crc kubenswrapper[4910]: E1125 21:50:11.520271 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="proxy-httpd" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.520280 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="proxy-httpd" Nov 25 21:50:11 crc kubenswrapper[4910]: E1125 21:50:11.520291 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="sg-core" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.520303 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="sg-core" Nov 25 21:50:11 crc kubenswrapper[4910]: E1125 21:50:11.520319 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="ceilometer-notification-agent" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.520325 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="ceilometer-notification-agent" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.520568 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="sg-core" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.520603 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="ceilometer-central-agent" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.520616 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="ceilometer-notification-agent" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.520636 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" containerName="proxy-httpd" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.523039 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.526289 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.526635 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.526862 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.544015 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.565051 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-scripts\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.565153 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.565424 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.567794 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-config-data\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.567838 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a46244d0-df94-438c-bb0d-145f6aeed4c0-run-httpd\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.567870 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsm7r\" (UniqueName: \"kubernetes.io/projected/a46244d0-df94-438c-bb0d-145f6aeed4c0-kube-api-access-xsm7r\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.567894 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a46244d0-df94-438c-bb0d-145f6aeed4c0-log-httpd\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.567936 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.671295 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-scripts\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.671376 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.671459 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.671554 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-config-data\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.671601 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a46244d0-df94-438c-bb0d-145f6aeed4c0-run-httpd\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.671639 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsm7r\" (UniqueName: \"kubernetes.io/projected/a46244d0-df94-438c-bb0d-145f6aeed4c0-kube-api-access-xsm7r\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.671674 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a46244d0-df94-438c-bb0d-145f6aeed4c0-log-httpd\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.671719 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.672129 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a46244d0-df94-438c-bb0d-145f6aeed4c0-run-httpd\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.672719 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a46244d0-df94-438c-bb0d-145f6aeed4c0-log-httpd\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.679904 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.680086 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.680984 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.684492 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-config-data\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.691963 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-scripts\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.697352 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsm7r\" (UniqueName: \"kubernetes.io/projected/a46244d0-df94-438c-bb0d-145f6aeed4c0-kube-api-access-xsm7r\") pod \"ceilometer-0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " pod="openstack/ceilometer-0" Nov 25 21:50:11 crc kubenswrapper[4910]: I1125 21:50:11.848976 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:50:12 crc kubenswrapper[4910]: I1125 21:50:12.387731 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:12 crc kubenswrapper[4910]: W1125 21:50:12.394041 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda46244d0_df94_438c_bb0d_145f6aeed4c0.slice/crio-fb5b2417bfee362203edd02a49ba583bfb75cdb3ba1315c0c7f4ae3ed522587e WatchSource:0}: Error finding container fb5b2417bfee362203edd02a49ba583bfb75cdb3ba1315c0c7f4ae3ed522587e: Status 404 returned error can't find the container with id fb5b2417bfee362203edd02a49ba583bfb75cdb3ba1315c0c7f4ae3ed522587e Nov 25 21:50:12 crc kubenswrapper[4910]: I1125 21:50:12.428780 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a46244d0-df94-438c-bb0d-145f6aeed4c0","Type":"ContainerStarted","Data":"fb5b2417bfee362203edd02a49ba583bfb75cdb3ba1315c0c7f4ae3ed522587e"} Nov 25 21:50:12 crc kubenswrapper[4910]: I1125 21:50:12.837420 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 21:50:12 crc kubenswrapper[4910]: I1125 21:50:12.837677 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 21:50:12 crc kubenswrapper[4910]: I1125 21:50:12.846782 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 21:50:12 crc kubenswrapper[4910]: I1125 21:50:12.847134 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.237184 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd9a223d-c5ad-44be-8e56-08129fd5b3ba" path="/var/lib/kubelet/pods/bd9a223d-c5ad-44be-8e56-08129fd5b3ba/volumes" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.351180 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.421099 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sj6rn\" (UniqueName: \"kubernetes.io/projected/d7f40bd9-6226-4bd6-ac74-12d030c150a8-kube-api-access-sj6rn\") pod \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\" (UID: \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\") " Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.421285 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7f40bd9-6226-4bd6-ac74-12d030c150a8-config-data\") pod \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\" (UID: \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\") " Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.421334 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7f40bd9-6226-4bd6-ac74-12d030c150a8-combined-ca-bundle\") pod \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\" (UID: \"d7f40bd9-6226-4bd6-ac74-12d030c150a8\") " Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.433192 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7f40bd9-6226-4bd6-ac74-12d030c150a8-kube-api-access-sj6rn" (OuterVolumeSpecName: "kube-api-access-sj6rn") pod "d7f40bd9-6226-4bd6-ac74-12d030c150a8" (UID: "d7f40bd9-6226-4bd6-ac74-12d030c150a8"). InnerVolumeSpecName "kube-api-access-sj6rn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.456891 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a46244d0-df94-438c-bb0d-145f6aeed4c0","Type":"ContainerStarted","Data":"31708d87cdaec4a15456dc30899212fc90be510dfcd17ab052ceb2669ed3575d"} Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.463357 4910 generic.go:334] "Generic (PLEG): container finished" podID="d7f40bd9-6226-4bd6-ac74-12d030c150a8" containerID="a0f30096c54b6bf3575771bfcfc85d375f68b79e59596c85fbc12544841efbe4" exitCode=137 Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.463482 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.463479 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d7f40bd9-6226-4bd6-ac74-12d030c150a8","Type":"ContainerDied","Data":"a0f30096c54b6bf3575771bfcfc85d375f68b79e59596c85fbc12544841efbe4"} Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.463557 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d7f40bd9-6226-4bd6-ac74-12d030c150a8","Type":"ContainerDied","Data":"a9211efccab60ec4fdbc4aac1eb87732ff534f1d64edd20367fe8d23ea473dd8"} Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.463581 4910 scope.go:117] "RemoveContainer" containerID="a0f30096c54b6bf3575771bfcfc85d375f68b79e59596c85fbc12544841efbe4" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.473546 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7f40bd9-6226-4bd6-ac74-12d030c150a8-config-data" (OuterVolumeSpecName: "config-data") pod "d7f40bd9-6226-4bd6-ac74-12d030c150a8" (UID: "d7f40bd9-6226-4bd6-ac74-12d030c150a8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.496049 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d7f40bd9-6226-4bd6-ac74-12d030c150a8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d7f40bd9-6226-4bd6-ac74-12d030c150a8" (UID: "d7f40bd9-6226-4bd6-ac74-12d030c150a8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.524225 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d7f40bd9-6226-4bd6-ac74-12d030c150a8-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.524301 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d7f40bd9-6226-4bd6-ac74-12d030c150a8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.524315 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sj6rn\" (UniqueName: \"kubernetes.io/projected/d7f40bd9-6226-4bd6-ac74-12d030c150a8-kube-api-access-sj6rn\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.561475 4910 scope.go:117] "RemoveContainer" containerID="a0f30096c54b6bf3575771bfcfc85d375f68b79e59596c85fbc12544841efbe4" Nov 25 21:50:13 crc kubenswrapper[4910]: E1125 21:50:13.562315 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a0f30096c54b6bf3575771bfcfc85d375f68b79e59596c85fbc12544841efbe4\": container with ID starting with a0f30096c54b6bf3575771bfcfc85d375f68b79e59596c85fbc12544841efbe4 not found: ID does not exist" containerID="a0f30096c54b6bf3575771bfcfc85d375f68b79e59596c85fbc12544841efbe4" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.562373 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a0f30096c54b6bf3575771bfcfc85d375f68b79e59596c85fbc12544841efbe4"} err="failed to get container status \"a0f30096c54b6bf3575771bfcfc85d375f68b79e59596c85fbc12544841efbe4\": rpc error: code = NotFound desc = could not find container \"a0f30096c54b6bf3575771bfcfc85d375f68b79e59596c85fbc12544841efbe4\": container with ID starting with a0f30096c54b6bf3575771bfcfc85d375f68b79e59596c85fbc12544841efbe4 not found: ID does not exist" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.806129 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.820018 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.831455 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 21:50:13 crc kubenswrapper[4910]: E1125 21:50:13.832284 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7f40bd9-6226-4bd6-ac74-12d030c150a8" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.832355 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7f40bd9-6226-4bd6-ac74-12d030c150a8" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.832576 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7f40bd9-6226-4bd6-ac74-12d030c150a8" containerName="nova-cell1-novncproxy-novncproxy" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.833418 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.838640 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.838925 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.839033 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.856764 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.932917 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/af1314f5-7ef2-46dd-b56d-3320375af199-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.933080 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/af1314f5-7ef2-46dd-b56d-3320375af199-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.933135 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af1314f5-7ef2-46dd-b56d-3320375af199-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.933414 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vvk9n\" (UniqueName: \"kubernetes.io/projected/af1314f5-7ef2-46dd-b56d-3320375af199-kube-api-access-vvk9n\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:13 crc kubenswrapper[4910]: I1125 21:50:13.933573 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af1314f5-7ef2-46dd-b56d-3320375af199-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:14 crc kubenswrapper[4910]: I1125 21:50:14.035199 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vvk9n\" (UniqueName: \"kubernetes.io/projected/af1314f5-7ef2-46dd-b56d-3320375af199-kube-api-access-vvk9n\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:14 crc kubenswrapper[4910]: I1125 21:50:14.035321 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af1314f5-7ef2-46dd-b56d-3320375af199-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:14 crc kubenswrapper[4910]: I1125 21:50:14.035401 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/af1314f5-7ef2-46dd-b56d-3320375af199-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:14 crc kubenswrapper[4910]: I1125 21:50:14.035456 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/af1314f5-7ef2-46dd-b56d-3320375af199-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:14 crc kubenswrapper[4910]: I1125 21:50:14.035482 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af1314f5-7ef2-46dd-b56d-3320375af199-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:14 crc kubenswrapper[4910]: I1125 21:50:14.040473 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/af1314f5-7ef2-46dd-b56d-3320375af199-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:14 crc kubenswrapper[4910]: I1125 21:50:14.040617 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af1314f5-7ef2-46dd-b56d-3320375af199-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:14 crc kubenswrapper[4910]: I1125 21:50:14.041477 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af1314f5-7ef2-46dd-b56d-3320375af199-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:14 crc kubenswrapper[4910]: I1125 21:50:14.041653 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/af1314f5-7ef2-46dd-b56d-3320375af199-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:14 crc kubenswrapper[4910]: I1125 21:50:14.052630 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vvk9n\" (UniqueName: \"kubernetes.io/projected/af1314f5-7ef2-46dd-b56d-3320375af199-kube-api-access-vvk9n\") pod \"nova-cell1-novncproxy-0\" (UID: \"af1314f5-7ef2-46dd-b56d-3320375af199\") " pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:14 crc kubenswrapper[4910]: I1125 21:50:14.241029 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:14 crc kubenswrapper[4910]: I1125 21:50:14.497714 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a46244d0-df94-438c-bb0d-145f6aeed4c0","Type":"ContainerStarted","Data":"3efc5cca8ccf8f5d5f962a7b2ef7b21c161ddd0b9bddf9d236962aa5ac27565d"} Nov 25 21:50:14 crc kubenswrapper[4910]: W1125 21:50:14.764383 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf1314f5_7ef2_46dd_b56d_3320375af199.slice/crio-6dab536b70773c6fabafba1e052cda1d18fbda915b8fed198b0b4ad30be3d5bb WatchSource:0}: Error finding container 6dab536b70773c6fabafba1e052cda1d18fbda915b8fed198b0b4ad30be3d5bb: Status 404 returned error can't find the container with id 6dab536b70773c6fabafba1e052cda1d18fbda915b8fed198b0b4ad30be3d5bb Nov 25 21:50:14 crc kubenswrapper[4910]: I1125 21:50:14.765361 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.220093 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7f40bd9-6226-4bd6-ac74-12d030c150a8" path="/var/lib/kubelet/pods/d7f40bd9-6226-4bd6-ac74-12d030c150a8/volumes" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.524382 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"af1314f5-7ef2-46dd-b56d-3320375af199","Type":"ContainerStarted","Data":"b2cfc1fe793f0ba314933e7740c5b89f8980f640205ec3974756fbb88d6f0b97"} Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.524441 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"af1314f5-7ef2-46dd-b56d-3320375af199","Type":"ContainerStarted","Data":"6dab536b70773c6fabafba1e052cda1d18fbda915b8fed198b0b4ad30be3d5bb"} Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.533138 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a46244d0-df94-438c-bb0d-145f6aeed4c0","Type":"ContainerStarted","Data":"74d2e1ba97dc893958e4e6a9dda26ea766a36b0c603e6ef6d24c6437da3d8647"} Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.555540 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.55551566 podStartE2EDuration="2.55551566s" podCreationTimestamp="2025-11-25 21:50:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:50:15.547640871 +0000 UTC m=+1171.010117193" watchObservedRunningTime="2025-11-25 21:50:15.55551566 +0000 UTC m=+1171.017991982" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.609516 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.609596 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.610437 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.610489 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.614556 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.614634 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.747887 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.866979 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-79prd"] Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.869556 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.885031 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-79prd"] Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.991031 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.991289 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.991585 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.991626 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmnx7\" (UniqueName: \"kubernetes.io/projected/2707588d-f101-4cbd-a3cb-cb6366cb0231-kube-api-access-mmnx7\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.991696 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-config\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:15 crc kubenswrapper[4910]: I1125 21:50:15.991790 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.095034 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.095153 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.095224 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.095340 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.095392 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmnx7\" (UniqueName: \"kubernetes.io/projected/2707588d-f101-4cbd-a3cb-cb6366cb0231-kube-api-access-mmnx7\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.095467 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-config\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.097268 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.097749 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-dns-svc\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.097856 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-config\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.098348 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.098538 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-dns-swift-storage-0\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.116754 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmnx7\" (UniqueName: \"kubernetes.io/projected/2707588d-f101-4cbd-a3cb-cb6366cb0231-kube-api-access-mmnx7\") pod \"dnsmasq-dns-5c7b6c5df9-79prd\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.213937 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.552706 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a46244d0-df94-438c-bb0d-145f6aeed4c0","Type":"ContainerStarted","Data":"a03101bdfea657b9f7d14be930f9e015b44a68d11f5768e10e06c5d4479fe4f4"} Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.553182 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.611320 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.9357181479999999 podStartE2EDuration="5.611294336s" podCreationTimestamp="2025-11-25 21:50:11 +0000 UTC" firstStartedPulling="2025-11-25 21:50:12.39732394 +0000 UTC m=+1167.859800272" lastFinishedPulling="2025-11-25 21:50:16.072900138 +0000 UTC m=+1171.535376460" observedRunningTime="2025-11-25 21:50:16.596847272 +0000 UTC m=+1172.059323594" watchObservedRunningTime="2025-11-25 21:50:16.611294336 +0000 UTC m=+1172.073770658" Nov 25 21:50:16 crc kubenswrapper[4910]: I1125 21:50:16.799126 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-79prd"] Nov 25 21:50:17 crc kubenswrapper[4910]: I1125 21:50:17.563291 4910 generic.go:334] "Generic (PLEG): container finished" podID="2707588d-f101-4cbd-a3cb-cb6366cb0231" containerID="5421ffe0e09ed20becc521690783484de0df47c749ae407a847e9e196592b824" exitCode=0 Nov 25 21:50:17 crc kubenswrapper[4910]: I1125 21:50:17.563447 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" event={"ID":"2707588d-f101-4cbd-a3cb-cb6366cb0231","Type":"ContainerDied","Data":"5421ffe0e09ed20becc521690783484de0df47c749ae407a847e9e196592b824"} Nov 25 21:50:17 crc kubenswrapper[4910]: I1125 21:50:17.563534 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" event={"ID":"2707588d-f101-4cbd-a3cb-cb6366cb0231","Type":"ContainerStarted","Data":"844fed519bce5af34dd867957570bca138590b67f07186a02eabf054232527ed"} Nov 25 21:50:18 crc kubenswrapper[4910]: I1125 21:50:18.349944 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:18 crc kubenswrapper[4910]: I1125 21:50:18.573812 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" event={"ID":"2707588d-f101-4cbd-a3cb-cb6366cb0231","Type":"ContainerStarted","Data":"c2d23beb2a8777214bfa1bf6da8f94e3a83fd1fc95e0f3478400c006ae36c99d"} Nov 25 21:50:18 crc kubenswrapper[4910]: I1125 21:50:18.574216 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="ceilometer-central-agent" containerID="cri-o://31708d87cdaec4a15456dc30899212fc90be510dfcd17ab052ceb2669ed3575d" gracePeriod=30 Nov 25 21:50:18 crc kubenswrapper[4910]: I1125 21:50:18.574297 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="ceilometer-notification-agent" containerID="cri-o://3efc5cca8ccf8f5d5f962a7b2ef7b21c161ddd0b9bddf9d236962aa5ac27565d" gracePeriod=30 Nov 25 21:50:18 crc kubenswrapper[4910]: I1125 21:50:18.574297 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="sg-core" containerID="cri-o://74d2e1ba97dc893958e4e6a9dda26ea766a36b0c603e6ef6d24c6437da3d8647" gracePeriod=30 Nov 25 21:50:18 crc kubenswrapper[4910]: I1125 21:50:18.574301 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="proxy-httpd" containerID="cri-o://a03101bdfea657b9f7d14be930f9e015b44a68d11f5768e10e06c5d4479fe4f4" gracePeriod=30 Nov 25 21:50:18 crc kubenswrapper[4910]: I1125 21:50:18.574407 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:18 crc kubenswrapper[4910]: I1125 21:50:18.614691 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" podStartSLOduration=3.6146662000000003 podStartE2EDuration="3.6146662s" podCreationTimestamp="2025-11-25 21:50:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:50:18.600932495 +0000 UTC m=+1174.063408817" watchObservedRunningTime="2025-11-25 21:50:18.6146662 +0000 UTC m=+1174.077142522" Nov 25 21:50:19 crc kubenswrapper[4910]: I1125 21:50:19.217395 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:50:19 crc kubenswrapper[4910]: I1125 21:50:19.218036 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d6915039-96f0-4cbb-963f-ed1e3d652b74" containerName="nova-api-log" containerID="cri-o://d4a3372df3cc1fdffd724ef2234100e51c21780029a0e3b346d72ec04d8d06ae" gracePeriod=30 Nov 25 21:50:19 crc kubenswrapper[4910]: I1125 21:50:19.218196 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d6915039-96f0-4cbb-963f-ed1e3d652b74" containerName="nova-api-api" containerID="cri-o://2cefd9f093938860b5f0f11317e98e898a75fb5eaff6bfb763e277ff3ac362e0" gracePeriod=30 Nov 25 21:50:19 crc kubenswrapper[4910]: I1125 21:50:19.243081 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:19 crc kubenswrapper[4910]: I1125 21:50:19.587438 4910 generic.go:334] "Generic (PLEG): container finished" podID="d6915039-96f0-4cbb-963f-ed1e3d652b74" containerID="d4a3372df3cc1fdffd724ef2234100e51c21780029a0e3b346d72ec04d8d06ae" exitCode=143 Nov 25 21:50:19 crc kubenswrapper[4910]: I1125 21:50:19.587490 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6915039-96f0-4cbb-963f-ed1e3d652b74","Type":"ContainerDied","Data":"d4a3372df3cc1fdffd724ef2234100e51c21780029a0e3b346d72ec04d8d06ae"} Nov 25 21:50:19 crc kubenswrapper[4910]: I1125 21:50:19.590826 4910 generic.go:334] "Generic (PLEG): container finished" podID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerID="a03101bdfea657b9f7d14be930f9e015b44a68d11f5768e10e06c5d4479fe4f4" exitCode=0 Nov 25 21:50:19 crc kubenswrapper[4910]: I1125 21:50:19.590851 4910 generic.go:334] "Generic (PLEG): container finished" podID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerID="74d2e1ba97dc893958e4e6a9dda26ea766a36b0c603e6ef6d24c6437da3d8647" exitCode=2 Nov 25 21:50:19 crc kubenswrapper[4910]: I1125 21:50:19.590859 4910 generic.go:334] "Generic (PLEG): container finished" podID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerID="3efc5cca8ccf8f5d5f962a7b2ef7b21c161ddd0b9bddf9d236962aa5ac27565d" exitCode=0 Nov 25 21:50:19 crc kubenswrapper[4910]: I1125 21:50:19.590897 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a46244d0-df94-438c-bb0d-145f6aeed4c0","Type":"ContainerDied","Data":"a03101bdfea657b9f7d14be930f9e015b44a68d11f5768e10e06c5d4479fe4f4"} Nov 25 21:50:19 crc kubenswrapper[4910]: I1125 21:50:19.590929 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a46244d0-df94-438c-bb0d-145f6aeed4c0","Type":"ContainerDied","Data":"74d2e1ba97dc893958e4e6a9dda26ea766a36b0c603e6ef6d24c6437da3d8647"} Nov 25 21:50:19 crc kubenswrapper[4910]: I1125 21:50:19.590945 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a46244d0-df94-438c-bb0d-145f6aeed4c0","Type":"ContainerDied","Data":"3efc5cca8ccf8f5d5f962a7b2ef7b21c161ddd0b9bddf9d236962aa5ac27565d"} Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.098510 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.219894 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a46244d0-df94-438c-bb0d-145f6aeed4c0-log-httpd\") pod \"a46244d0-df94-438c-bb0d-145f6aeed4c0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.220166 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-combined-ca-bundle\") pod \"a46244d0-df94-438c-bb0d-145f6aeed4c0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.220520 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xsm7r\" (UniqueName: \"kubernetes.io/projected/a46244d0-df94-438c-bb0d-145f6aeed4c0-kube-api-access-xsm7r\") pod \"a46244d0-df94-438c-bb0d-145f6aeed4c0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.221150 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-config-data\") pod \"a46244d0-df94-438c-bb0d-145f6aeed4c0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.221192 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a46244d0-df94-438c-bb0d-145f6aeed4c0-run-httpd\") pod \"a46244d0-df94-438c-bb0d-145f6aeed4c0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.221203 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a46244d0-df94-438c-bb0d-145f6aeed4c0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a46244d0-df94-438c-bb0d-145f6aeed4c0" (UID: "a46244d0-df94-438c-bb0d-145f6aeed4c0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.221279 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-sg-core-conf-yaml\") pod \"a46244d0-df94-438c-bb0d-145f6aeed4c0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.221355 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-ceilometer-tls-certs\") pod \"a46244d0-df94-438c-bb0d-145f6aeed4c0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.221424 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-scripts\") pod \"a46244d0-df94-438c-bb0d-145f6aeed4c0\" (UID: \"a46244d0-df94-438c-bb0d-145f6aeed4c0\") " Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.221679 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a46244d0-df94-438c-bb0d-145f6aeed4c0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a46244d0-df94-438c-bb0d-145f6aeed4c0" (UID: "a46244d0-df94-438c-bb0d-145f6aeed4c0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.222196 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a46244d0-df94-438c-bb0d-145f6aeed4c0-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.222214 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a46244d0-df94-438c-bb0d-145f6aeed4c0-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.232036 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-scripts" (OuterVolumeSpecName: "scripts") pod "a46244d0-df94-438c-bb0d-145f6aeed4c0" (UID: "a46244d0-df94-438c-bb0d-145f6aeed4c0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.232153 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a46244d0-df94-438c-bb0d-145f6aeed4c0-kube-api-access-xsm7r" (OuterVolumeSpecName: "kube-api-access-xsm7r") pod "a46244d0-df94-438c-bb0d-145f6aeed4c0" (UID: "a46244d0-df94-438c-bb0d-145f6aeed4c0"). InnerVolumeSpecName "kube-api-access-xsm7r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.250894 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a46244d0-df94-438c-bb0d-145f6aeed4c0" (UID: "a46244d0-df94-438c-bb0d-145f6aeed4c0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.281458 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "a46244d0-df94-438c-bb0d-145f6aeed4c0" (UID: "a46244d0-df94-438c-bb0d-145f6aeed4c0"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.312304 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a46244d0-df94-438c-bb0d-145f6aeed4c0" (UID: "a46244d0-df94-438c-bb0d-145f6aeed4c0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.323774 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.323816 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.323827 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xsm7r\" (UniqueName: \"kubernetes.io/projected/a46244d0-df94-438c-bb0d-145f6aeed4c0-kube-api-access-xsm7r\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.323840 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.323849 4910 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.334828 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-config-data" (OuterVolumeSpecName: "config-data") pod "a46244d0-df94-438c-bb0d-145f6aeed4c0" (UID: "a46244d0-df94-438c-bb0d-145f6aeed4c0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.427062 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a46244d0-df94-438c-bb0d-145f6aeed4c0-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.632678 4910 generic.go:334] "Generic (PLEG): container finished" podID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerID="31708d87cdaec4a15456dc30899212fc90be510dfcd17ab052ceb2669ed3575d" exitCode=0 Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.632750 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.632782 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a46244d0-df94-438c-bb0d-145f6aeed4c0","Type":"ContainerDied","Data":"31708d87cdaec4a15456dc30899212fc90be510dfcd17ab052ceb2669ed3575d"} Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.636203 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a46244d0-df94-438c-bb0d-145f6aeed4c0","Type":"ContainerDied","Data":"fb5b2417bfee362203edd02a49ba583bfb75cdb3ba1315c0c7f4ae3ed522587e"} Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.636267 4910 scope.go:117] "RemoveContainer" containerID="a03101bdfea657b9f7d14be930f9e015b44a68d11f5768e10e06c5d4479fe4f4" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.661376 4910 scope.go:117] "RemoveContainer" containerID="74d2e1ba97dc893958e4e6a9dda26ea766a36b0c603e6ef6d24c6437da3d8647" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.699348 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.700268 4910 scope.go:117] "RemoveContainer" containerID="3efc5cca8ccf8f5d5f962a7b2ef7b21c161ddd0b9bddf9d236962aa5ac27565d" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.708670 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.728632 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:21 crc kubenswrapper[4910]: E1125 21:50:21.729299 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="proxy-httpd" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.729321 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="proxy-httpd" Nov 25 21:50:21 crc kubenswrapper[4910]: E1125 21:50:21.729341 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="ceilometer-central-agent" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.729347 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="ceilometer-central-agent" Nov 25 21:50:21 crc kubenswrapper[4910]: E1125 21:50:21.729359 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="ceilometer-notification-agent" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.729368 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="ceilometer-notification-agent" Nov 25 21:50:21 crc kubenswrapper[4910]: E1125 21:50:21.729382 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="sg-core" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.729390 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="sg-core" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.729589 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="proxy-httpd" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.729602 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="ceilometer-central-agent" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.729610 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="sg-core" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.729647 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" containerName="ceilometer-notification-agent" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.731812 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.733986 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.734118 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.735879 4910 scope.go:117] "RemoveContainer" containerID="31708d87cdaec4a15456dc30899212fc90be510dfcd17ab052ceb2669ed3575d" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.737427 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.744338 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.804026 4910 scope.go:117] "RemoveContainer" containerID="a03101bdfea657b9f7d14be930f9e015b44a68d11f5768e10e06c5d4479fe4f4" Nov 25 21:50:21 crc kubenswrapper[4910]: E1125 21:50:21.804563 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a03101bdfea657b9f7d14be930f9e015b44a68d11f5768e10e06c5d4479fe4f4\": container with ID starting with a03101bdfea657b9f7d14be930f9e015b44a68d11f5768e10e06c5d4479fe4f4 not found: ID does not exist" containerID="a03101bdfea657b9f7d14be930f9e015b44a68d11f5768e10e06c5d4479fe4f4" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.804595 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a03101bdfea657b9f7d14be930f9e015b44a68d11f5768e10e06c5d4479fe4f4"} err="failed to get container status \"a03101bdfea657b9f7d14be930f9e015b44a68d11f5768e10e06c5d4479fe4f4\": rpc error: code = NotFound desc = could not find container \"a03101bdfea657b9f7d14be930f9e015b44a68d11f5768e10e06c5d4479fe4f4\": container with ID starting with a03101bdfea657b9f7d14be930f9e015b44a68d11f5768e10e06c5d4479fe4f4 not found: ID does not exist" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.804618 4910 scope.go:117] "RemoveContainer" containerID="74d2e1ba97dc893958e4e6a9dda26ea766a36b0c603e6ef6d24c6437da3d8647" Nov 25 21:50:21 crc kubenswrapper[4910]: E1125 21:50:21.804858 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74d2e1ba97dc893958e4e6a9dda26ea766a36b0c603e6ef6d24c6437da3d8647\": container with ID starting with 74d2e1ba97dc893958e4e6a9dda26ea766a36b0c603e6ef6d24c6437da3d8647 not found: ID does not exist" containerID="74d2e1ba97dc893958e4e6a9dda26ea766a36b0c603e6ef6d24c6437da3d8647" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.804884 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74d2e1ba97dc893958e4e6a9dda26ea766a36b0c603e6ef6d24c6437da3d8647"} err="failed to get container status \"74d2e1ba97dc893958e4e6a9dda26ea766a36b0c603e6ef6d24c6437da3d8647\": rpc error: code = NotFound desc = could not find container \"74d2e1ba97dc893958e4e6a9dda26ea766a36b0c603e6ef6d24c6437da3d8647\": container with ID starting with 74d2e1ba97dc893958e4e6a9dda26ea766a36b0c603e6ef6d24c6437da3d8647 not found: ID does not exist" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.804897 4910 scope.go:117] "RemoveContainer" containerID="3efc5cca8ccf8f5d5f962a7b2ef7b21c161ddd0b9bddf9d236962aa5ac27565d" Nov 25 21:50:21 crc kubenswrapper[4910]: E1125 21:50:21.805123 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3efc5cca8ccf8f5d5f962a7b2ef7b21c161ddd0b9bddf9d236962aa5ac27565d\": container with ID starting with 3efc5cca8ccf8f5d5f962a7b2ef7b21c161ddd0b9bddf9d236962aa5ac27565d not found: ID does not exist" containerID="3efc5cca8ccf8f5d5f962a7b2ef7b21c161ddd0b9bddf9d236962aa5ac27565d" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.805146 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3efc5cca8ccf8f5d5f962a7b2ef7b21c161ddd0b9bddf9d236962aa5ac27565d"} err="failed to get container status \"3efc5cca8ccf8f5d5f962a7b2ef7b21c161ddd0b9bddf9d236962aa5ac27565d\": rpc error: code = NotFound desc = could not find container \"3efc5cca8ccf8f5d5f962a7b2ef7b21c161ddd0b9bddf9d236962aa5ac27565d\": container with ID starting with 3efc5cca8ccf8f5d5f962a7b2ef7b21c161ddd0b9bddf9d236962aa5ac27565d not found: ID does not exist" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.805161 4910 scope.go:117] "RemoveContainer" containerID="31708d87cdaec4a15456dc30899212fc90be510dfcd17ab052ceb2669ed3575d" Nov 25 21:50:21 crc kubenswrapper[4910]: E1125 21:50:21.805431 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"31708d87cdaec4a15456dc30899212fc90be510dfcd17ab052ceb2669ed3575d\": container with ID starting with 31708d87cdaec4a15456dc30899212fc90be510dfcd17ab052ceb2669ed3575d not found: ID does not exist" containerID="31708d87cdaec4a15456dc30899212fc90be510dfcd17ab052ceb2669ed3575d" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.805483 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"31708d87cdaec4a15456dc30899212fc90be510dfcd17ab052ceb2669ed3575d"} err="failed to get container status \"31708d87cdaec4a15456dc30899212fc90be510dfcd17ab052ceb2669ed3575d\": rpc error: code = NotFound desc = could not find container \"31708d87cdaec4a15456dc30899212fc90be510dfcd17ab052ceb2669ed3575d\": container with ID starting with 31708d87cdaec4a15456dc30899212fc90be510dfcd17ab052ceb2669ed3575d not found: ID does not exist" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.836883 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.837233 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.837363 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46ecefa-aedd-4e29-8209-a584e807bfcb-run-httpd\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.837498 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-config-data\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.837627 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46ecefa-aedd-4e29-8209-a584e807bfcb-log-httpd\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.837737 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m8lq5\" (UniqueName: \"kubernetes.io/projected/c46ecefa-aedd-4e29-8209-a584e807bfcb-kube-api-access-m8lq5\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.837823 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.837965 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-scripts\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.940439 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.940544 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.940573 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46ecefa-aedd-4e29-8209-a584e807bfcb-run-httpd\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.940642 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-config-data\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.940712 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46ecefa-aedd-4e29-8209-a584e807bfcb-log-httpd\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.940811 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m8lq5\" (UniqueName: \"kubernetes.io/projected/c46ecefa-aedd-4e29-8209-a584e807bfcb-kube-api-access-m8lq5\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.940945 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.940985 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-scripts\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.941520 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46ecefa-aedd-4e29-8209-a584e807bfcb-log-httpd\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.941525 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46ecefa-aedd-4e29-8209-a584e807bfcb-run-httpd\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.945598 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-scripts\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.945679 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.946350 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.946399 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.948175 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-config-data\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.960997 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m8lq5\" (UniqueName: \"kubernetes.io/projected/c46ecefa-aedd-4e29-8209-a584e807bfcb-kube-api-access-m8lq5\") pod \"ceilometer-0\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " pod="openstack/ceilometer-0" Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.998050 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:21 crc kubenswrapper[4910]: I1125 21:50:21.999004 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.448776 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.652153 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46ecefa-aedd-4e29-8209-a584e807bfcb","Type":"ContainerStarted","Data":"b10bba9c73fa26185f1e480b42cb1858f4dd649c04cf92b0a4f443a833ae70b4"} Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.654794 4910 generic.go:334] "Generic (PLEG): container finished" podID="d6915039-96f0-4cbb-963f-ed1e3d652b74" containerID="2cefd9f093938860b5f0f11317e98e898a75fb5eaff6bfb763e277ff3ac362e0" exitCode=0 Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.654835 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6915039-96f0-4cbb-963f-ed1e3d652b74","Type":"ContainerDied","Data":"2cefd9f093938860b5f0f11317e98e898a75fb5eaff6bfb763e277ff3ac362e0"} Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.721719 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.765826 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6915039-96f0-4cbb-963f-ed1e3d652b74-config-data\") pod \"d6915039-96f0-4cbb-963f-ed1e3d652b74\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.765944 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kvqvg\" (UniqueName: \"kubernetes.io/projected/d6915039-96f0-4cbb-963f-ed1e3d652b74-kube-api-access-kvqvg\") pod \"d6915039-96f0-4cbb-963f-ed1e3d652b74\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.766000 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6915039-96f0-4cbb-963f-ed1e3d652b74-logs\") pod \"d6915039-96f0-4cbb-963f-ed1e3d652b74\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.766395 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6915039-96f0-4cbb-963f-ed1e3d652b74-combined-ca-bundle\") pod \"d6915039-96f0-4cbb-963f-ed1e3d652b74\" (UID: \"d6915039-96f0-4cbb-963f-ed1e3d652b74\") " Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.768229 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d6915039-96f0-4cbb-963f-ed1e3d652b74-logs" (OuterVolumeSpecName: "logs") pod "d6915039-96f0-4cbb-963f-ed1e3d652b74" (UID: "d6915039-96f0-4cbb-963f-ed1e3d652b74"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.774526 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d6915039-96f0-4cbb-963f-ed1e3d652b74-kube-api-access-kvqvg" (OuterVolumeSpecName: "kube-api-access-kvqvg") pod "d6915039-96f0-4cbb-963f-ed1e3d652b74" (UID: "d6915039-96f0-4cbb-963f-ed1e3d652b74"). InnerVolumeSpecName "kube-api-access-kvqvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.813805 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6915039-96f0-4cbb-963f-ed1e3d652b74-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d6915039-96f0-4cbb-963f-ed1e3d652b74" (UID: "d6915039-96f0-4cbb-963f-ed1e3d652b74"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.825174 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d6915039-96f0-4cbb-963f-ed1e3d652b74-config-data" (OuterVolumeSpecName: "config-data") pod "d6915039-96f0-4cbb-963f-ed1e3d652b74" (UID: "d6915039-96f0-4cbb-963f-ed1e3d652b74"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.869573 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kvqvg\" (UniqueName: \"kubernetes.io/projected/d6915039-96f0-4cbb-963f-ed1e3d652b74-kube-api-access-kvqvg\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.869612 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d6915039-96f0-4cbb-963f-ed1e3d652b74-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.869622 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d6915039-96f0-4cbb-963f-ed1e3d652b74-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:22 crc kubenswrapper[4910]: I1125 21:50:22.869631 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d6915039-96f0-4cbb-963f-ed1e3d652b74-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.216827 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a46244d0-df94-438c-bb0d-145f6aeed4c0" path="/var/lib/kubelet/pods/a46244d0-df94-438c-bb0d-145f6aeed4c0/volumes" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.666200 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46ecefa-aedd-4e29-8209-a584e807bfcb","Type":"ContainerStarted","Data":"f5505e64f7dfbfff65653a7e541748a0b19c95991f2e8ce3c0b58644f3fb4be6"} Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.669631 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d6915039-96f0-4cbb-963f-ed1e3d652b74","Type":"ContainerDied","Data":"88a17135047f84b572511c727e2c04ba8998712593a653e693a2f4e50ad924cb"} Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.669686 4910 scope.go:117] "RemoveContainer" containerID="2cefd9f093938860b5f0f11317e98e898a75fb5eaff6bfb763e277ff3ac362e0" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.669735 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.739382 4910 scope.go:117] "RemoveContainer" containerID="d4a3372df3cc1fdffd724ef2234100e51c21780029a0e3b346d72ec04d8d06ae" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.762495 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.782365 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.793735 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 21:50:23 crc kubenswrapper[4910]: E1125 21:50:23.794416 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6915039-96f0-4cbb-963f-ed1e3d652b74" containerName="nova-api-log" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.794508 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6915039-96f0-4cbb-963f-ed1e3d652b74" containerName="nova-api-log" Nov 25 21:50:23 crc kubenswrapper[4910]: E1125 21:50:23.794617 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d6915039-96f0-4cbb-963f-ed1e3d652b74" containerName="nova-api-api" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.794705 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d6915039-96f0-4cbb-963f-ed1e3d652b74" containerName="nova-api-api" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.795049 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6915039-96f0-4cbb-963f-ed1e3d652b74" containerName="nova-api-api" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.795148 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d6915039-96f0-4cbb-963f-ed1e3d652b74" containerName="nova-api-log" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.796484 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.799541 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.799615 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.799715 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.801475 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.899643 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.899724 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.899769 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-public-tls-certs\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.899812 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mtx8\" (UniqueName: \"kubernetes.io/projected/bede3e63-0748-4865-a2b0-61fddcbd3291-kube-api-access-2mtx8\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.899866 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bede3e63-0748-4865-a2b0-61fddcbd3291-logs\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:23 crc kubenswrapper[4910]: I1125 21:50:23.899905 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-config-data\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.002087 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.002149 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.002188 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-public-tls-certs\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.002225 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mtx8\" (UniqueName: \"kubernetes.io/projected/bede3e63-0748-4865-a2b0-61fddcbd3291-kube-api-access-2mtx8\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.002285 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bede3e63-0748-4865-a2b0-61fddcbd3291-logs\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.002310 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-config-data\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.003701 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bede3e63-0748-4865-a2b0-61fddcbd3291-logs\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.006799 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-config-data\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.006794 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-internal-tls-certs\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.007585 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-public-tls-certs\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.015865 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.032904 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mtx8\" (UniqueName: \"kubernetes.io/projected/bede3e63-0748-4865-a2b0-61fddcbd3291-kube-api-access-2mtx8\") pod \"nova-api-0\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " pod="openstack/nova-api-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.120897 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.243604 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.266603 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.578743 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:50:24 crc kubenswrapper[4910]: W1125 21:50:24.590661 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbede3e63_0748_4865_a2b0_61fddcbd3291.slice/crio-f7753d45b045a23e21a7c03156262a5382f7320f8ff51d770497cee45c0d0d72 WatchSource:0}: Error finding container f7753d45b045a23e21a7c03156262a5382f7320f8ff51d770497cee45c0d0d72: Status 404 returned error can't find the container with id f7753d45b045a23e21a7c03156262a5382f7320f8ff51d770497cee45c0d0d72 Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.692555 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46ecefa-aedd-4e29-8209-a584e807bfcb","Type":"ContainerStarted","Data":"a616a1dfdd96eb898135f6b0dd2654123cffa0194411868f7f9de6525e19edd9"} Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.692624 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46ecefa-aedd-4e29-8209-a584e807bfcb","Type":"ContainerStarted","Data":"b19d9a8203a344e3092c7ab63c58edbaaf5dba58db274192a9a37a1fe5ceb17e"} Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.694689 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bede3e63-0748-4865-a2b0-61fddcbd3291","Type":"ContainerStarted","Data":"f7753d45b045a23e21a7c03156262a5382f7320f8ff51d770497cee45c0d0d72"} Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.720705 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.903655 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-tm28z"] Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.905117 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.908041 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.908274 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.926838 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-scripts\") pod \"nova-cell1-cell-mapping-tm28z\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.926981 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-tm28z\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.927054 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cq682\" (UniqueName: \"kubernetes.io/projected/dd9248e6-abed-48da-948e-3cf59171c0e7-kube-api-access-cq682\") pod \"nova-cell1-cell-mapping-tm28z\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.927110 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-config-data\") pod \"nova-cell1-cell-mapping-tm28z\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:24 crc kubenswrapper[4910]: I1125 21:50:24.932559 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-tm28z"] Nov 25 21:50:25 crc kubenswrapper[4910]: I1125 21:50:25.028984 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-scripts\") pod \"nova-cell1-cell-mapping-tm28z\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:25 crc kubenswrapper[4910]: I1125 21:50:25.030938 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-tm28z\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:25 crc kubenswrapper[4910]: I1125 21:50:25.031314 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cq682\" (UniqueName: \"kubernetes.io/projected/dd9248e6-abed-48da-948e-3cf59171c0e7-kube-api-access-cq682\") pod \"nova-cell1-cell-mapping-tm28z\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:25 crc kubenswrapper[4910]: I1125 21:50:25.031444 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-config-data\") pod \"nova-cell1-cell-mapping-tm28z\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:25 crc kubenswrapper[4910]: I1125 21:50:25.036484 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-scripts\") pod \"nova-cell1-cell-mapping-tm28z\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:25 crc kubenswrapper[4910]: I1125 21:50:25.040697 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-tm28z\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:25 crc kubenswrapper[4910]: I1125 21:50:25.052396 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-config-data\") pod \"nova-cell1-cell-mapping-tm28z\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:25 crc kubenswrapper[4910]: I1125 21:50:25.057411 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cq682\" (UniqueName: \"kubernetes.io/projected/dd9248e6-abed-48da-948e-3cf59171c0e7-kube-api-access-cq682\") pod \"nova-cell1-cell-mapping-tm28z\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:25 crc kubenswrapper[4910]: I1125 21:50:25.225065 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d6915039-96f0-4cbb-963f-ed1e3d652b74" path="/var/lib/kubelet/pods/d6915039-96f0-4cbb-963f-ed1e3d652b74/volumes" Nov 25 21:50:25 crc kubenswrapper[4910]: I1125 21:50:25.260882 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:25 crc kubenswrapper[4910]: I1125 21:50:25.773588 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bede3e63-0748-4865-a2b0-61fddcbd3291","Type":"ContainerStarted","Data":"5b5d1584a6847399b1bb919cadf44011e5cdf957893cfa1866367663452414b0"} Nov 25 21:50:25 crc kubenswrapper[4910]: I1125 21:50:25.773668 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bede3e63-0748-4865-a2b0-61fddcbd3291","Type":"ContainerStarted","Data":"51ee8f18fa412b576b90d5031135e777a98a8be919529593a5aecb720cc5facf"} Nov 25 21:50:25 crc kubenswrapper[4910]: I1125 21:50:25.786016 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-tm28z"] Nov 25 21:50:25 crc kubenswrapper[4910]: I1125 21:50:25.800788 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.800757606 podStartE2EDuration="2.800757606s" podCreationTimestamp="2025-11-25 21:50:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:50:25.798999239 +0000 UTC m=+1181.261475551" watchObservedRunningTime="2025-11-25 21:50:25.800757606 +0000 UTC m=+1181.263233928" Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.216425 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.307603 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-qcxp8"] Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.308806 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" podUID="6c213fd9-da6e-4244-b118-c038c8b70341" containerName="dnsmasq-dns" containerID="cri-o://f21ed05aacbe49a3c5fc46f00d4e5c8aa1187e383b9a3799560a46104bc56a48" gracePeriod=10 Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.789414 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46ecefa-aedd-4e29-8209-a584e807bfcb","Type":"ContainerStarted","Data":"dbf2155701bb5ddb9d1aaa5c4f19faae3e02ad65f8d0875d24b85a52af7fa244"} Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.789651 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="ceilometer-central-agent" containerID="cri-o://f5505e64f7dfbfff65653a7e541748a0b19c95991f2e8ce3c0b58644f3fb4be6" gracePeriod=30 Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.789742 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.790085 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="proxy-httpd" containerID="cri-o://dbf2155701bb5ddb9d1aaa5c4f19faae3e02ad65f8d0875d24b85a52af7fa244" gracePeriod=30 Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.790147 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="sg-core" containerID="cri-o://a616a1dfdd96eb898135f6b0dd2654123cffa0194411868f7f9de6525e19edd9" gracePeriod=30 Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.790188 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="ceilometer-notification-agent" containerID="cri-o://b19d9a8203a344e3092c7ab63c58edbaaf5dba58db274192a9a37a1fe5ceb17e" gracePeriod=30 Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.798122 4910 generic.go:334] "Generic (PLEG): container finished" podID="6c213fd9-da6e-4244-b118-c038c8b70341" containerID="f21ed05aacbe49a3c5fc46f00d4e5c8aa1187e383b9a3799560a46104bc56a48" exitCode=0 Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.798203 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" event={"ID":"6c213fd9-da6e-4244-b118-c038c8b70341","Type":"ContainerDied","Data":"f21ed05aacbe49a3c5fc46f00d4e5c8aa1187e383b9a3799560a46104bc56a48"} Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.801085 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-tm28z" event={"ID":"dd9248e6-abed-48da-948e-3cf59171c0e7","Type":"ContainerStarted","Data":"3dad5fd9f70dee45b08e55b77d3c1230c80814ceb669e62513c208e736150bc9"} Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.801116 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-tm28z" event={"ID":"dd9248e6-abed-48da-948e-3cf59171c0e7","Type":"ContainerStarted","Data":"5434dee8b103530027ddd1d71053bb2aa0ac888e6cfa7c1034343240317f8e1b"} Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.822300 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.811211369 podStartE2EDuration="5.822238832s" podCreationTimestamp="2025-11-25 21:50:21 +0000 UTC" firstStartedPulling="2025-11-25 21:50:22.464107842 +0000 UTC m=+1177.926584174" lastFinishedPulling="2025-11-25 21:50:25.475135315 +0000 UTC m=+1180.937611637" observedRunningTime="2025-11-25 21:50:26.814953729 +0000 UTC m=+1182.277430061" watchObservedRunningTime="2025-11-25 21:50:26.822238832 +0000 UTC m=+1182.284715154" Nov 25 21:50:26 crc kubenswrapper[4910]: I1125 21:50:26.846447 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-tm28z" podStartSLOduration=2.846419384 podStartE2EDuration="2.846419384s" podCreationTimestamp="2025-11-25 21:50:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:50:26.834741634 +0000 UTC m=+1182.297217956" watchObservedRunningTime="2025-11-25 21:50:26.846419384 +0000 UTC m=+1182.308895706" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.080675 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.098691 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2pdls\" (UniqueName: \"kubernetes.io/projected/6c213fd9-da6e-4244-b118-c038c8b70341-kube-api-access-2pdls\") pod \"6c213fd9-da6e-4244-b118-c038c8b70341\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.098752 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-ovsdbserver-sb\") pod \"6c213fd9-da6e-4244-b118-c038c8b70341\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.098778 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-dns-swift-storage-0\") pod \"6c213fd9-da6e-4244-b118-c038c8b70341\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.098966 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-dns-svc\") pod \"6c213fd9-da6e-4244-b118-c038c8b70341\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.099042 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-config\") pod \"6c213fd9-da6e-4244-b118-c038c8b70341\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.099232 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-ovsdbserver-nb\") pod \"6c213fd9-da6e-4244-b118-c038c8b70341\" (UID: \"6c213fd9-da6e-4244-b118-c038c8b70341\") " Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.150081 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c213fd9-da6e-4244-b118-c038c8b70341-kube-api-access-2pdls" (OuterVolumeSpecName: "kube-api-access-2pdls") pod "6c213fd9-da6e-4244-b118-c038c8b70341" (UID: "6c213fd9-da6e-4244-b118-c038c8b70341"). InnerVolumeSpecName "kube-api-access-2pdls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.180332 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-config" (OuterVolumeSpecName: "config") pod "6c213fd9-da6e-4244-b118-c038c8b70341" (UID: "6c213fd9-da6e-4244-b118-c038c8b70341"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.204764 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.204803 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2pdls\" (UniqueName: \"kubernetes.io/projected/6c213fd9-da6e-4244-b118-c038c8b70341-kube-api-access-2pdls\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.221260 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6c213fd9-da6e-4244-b118-c038c8b70341" (UID: "6c213fd9-da6e-4244-b118-c038c8b70341"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.223793 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6c213fd9-da6e-4244-b118-c038c8b70341" (UID: "6c213fd9-da6e-4244-b118-c038c8b70341"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.230572 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6c213fd9-da6e-4244-b118-c038c8b70341" (UID: "6c213fd9-da6e-4244-b118-c038c8b70341"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.231736 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6c213fd9-da6e-4244-b118-c038c8b70341" (UID: "6c213fd9-da6e-4244-b118-c038c8b70341"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.306789 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.306831 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.306844 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.306853 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6c213fd9-da6e-4244-b118-c038c8b70341-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.819310 4910 generic.go:334] "Generic (PLEG): container finished" podID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerID="dbf2155701bb5ddb9d1aaa5c4f19faae3e02ad65f8d0875d24b85a52af7fa244" exitCode=0 Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.820293 4910 generic.go:334] "Generic (PLEG): container finished" podID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerID="a616a1dfdd96eb898135f6b0dd2654123cffa0194411868f7f9de6525e19edd9" exitCode=2 Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.820371 4910 generic.go:334] "Generic (PLEG): container finished" podID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerID="b19d9a8203a344e3092c7ab63c58edbaaf5dba58db274192a9a37a1fe5ceb17e" exitCode=0 Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.819409 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46ecefa-aedd-4e29-8209-a584e807bfcb","Type":"ContainerDied","Data":"dbf2155701bb5ddb9d1aaa5c4f19faae3e02ad65f8d0875d24b85a52af7fa244"} Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.820559 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46ecefa-aedd-4e29-8209-a584e807bfcb","Type":"ContainerDied","Data":"a616a1dfdd96eb898135f6b0dd2654123cffa0194411868f7f9de6525e19edd9"} Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.820644 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46ecefa-aedd-4e29-8209-a584e807bfcb","Type":"ContainerDied","Data":"b19d9a8203a344e3092c7ab63c58edbaaf5dba58db274192a9a37a1fe5ceb17e"} Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.823777 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.823879 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-865f5d856f-qcxp8" event={"ID":"6c213fd9-da6e-4244-b118-c038c8b70341","Type":"ContainerDied","Data":"bf1aad104e6035a2f003758f5f0269b588ab07f93f8710a505cbd0fc06031eb0"} Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.824055 4910 scope.go:117] "RemoveContainer" containerID="f21ed05aacbe49a3c5fc46f00d4e5c8aa1187e383b9a3799560a46104bc56a48" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.853405 4910 scope.go:117] "RemoveContainer" containerID="6c093136edfb85328a19d6cec35d7d75459ab93afed95bc361f1226185dd3af9" Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.875712 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-qcxp8"] Nov 25 21:50:27 crc kubenswrapper[4910]: I1125 21:50:27.889750 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-865f5d856f-qcxp8"] Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.224081 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c213fd9-da6e-4244-b118-c038c8b70341" path="/var/lib/kubelet/pods/6c213fd9-da6e-4244-b118-c038c8b70341/volumes" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.733083 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.860545 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m8lq5\" (UniqueName: \"kubernetes.io/projected/c46ecefa-aedd-4e29-8209-a584e807bfcb-kube-api-access-m8lq5\") pod \"c46ecefa-aedd-4e29-8209-a584e807bfcb\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.860665 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-ceilometer-tls-certs\") pod \"c46ecefa-aedd-4e29-8209-a584e807bfcb\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.860724 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-combined-ca-bundle\") pod \"c46ecefa-aedd-4e29-8209-a584e807bfcb\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.860810 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-scripts\") pod \"c46ecefa-aedd-4e29-8209-a584e807bfcb\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.860840 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-sg-core-conf-yaml\") pod \"c46ecefa-aedd-4e29-8209-a584e807bfcb\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.860903 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46ecefa-aedd-4e29-8209-a584e807bfcb-run-httpd\") pod \"c46ecefa-aedd-4e29-8209-a584e807bfcb\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.860957 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-config-data\") pod \"c46ecefa-aedd-4e29-8209-a584e807bfcb\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.860992 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46ecefa-aedd-4e29-8209-a584e807bfcb-log-httpd\") pod \"c46ecefa-aedd-4e29-8209-a584e807bfcb\" (UID: \"c46ecefa-aedd-4e29-8209-a584e807bfcb\") " Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.861881 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c46ecefa-aedd-4e29-8209-a584e807bfcb-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c46ecefa-aedd-4e29-8209-a584e807bfcb" (UID: "c46ecefa-aedd-4e29-8209-a584e807bfcb"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.861929 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c46ecefa-aedd-4e29-8209-a584e807bfcb-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c46ecefa-aedd-4e29-8209-a584e807bfcb" (UID: "c46ecefa-aedd-4e29-8209-a584e807bfcb"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.867939 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-scripts" (OuterVolumeSpecName: "scripts") pod "c46ecefa-aedd-4e29-8209-a584e807bfcb" (UID: "c46ecefa-aedd-4e29-8209-a584e807bfcb"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.868472 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c46ecefa-aedd-4e29-8209-a584e807bfcb-kube-api-access-m8lq5" (OuterVolumeSpecName: "kube-api-access-m8lq5") pod "c46ecefa-aedd-4e29-8209-a584e807bfcb" (UID: "c46ecefa-aedd-4e29-8209-a584e807bfcb"). InnerVolumeSpecName "kube-api-access-m8lq5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.893103 4910 generic.go:334] "Generic (PLEG): container finished" podID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerID="f5505e64f7dfbfff65653a7e541748a0b19c95991f2e8ce3c0b58644f3fb4be6" exitCode=0 Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.893193 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.893219 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46ecefa-aedd-4e29-8209-a584e807bfcb","Type":"ContainerDied","Data":"f5505e64f7dfbfff65653a7e541748a0b19c95991f2e8ce3c0b58644f3fb4be6"} Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.893404 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c46ecefa-aedd-4e29-8209-a584e807bfcb","Type":"ContainerDied","Data":"b10bba9c73fa26185f1e480b42cb1858f4dd649c04cf92b0a4f443a833ae70b4"} Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.893472 4910 scope.go:117] "RemoveContainer" containerID="dbf2155701bb5ddb9d1aaa5c4f19faae3e02ad65f8d0875d24b85a52af7fa244" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.912763 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c46ecefa-aedd-4e29-8209-a584e807bfcb" (UID: "c46ecefa-aedd-4e29-8209-a584e807bfcb"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.921684 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "c46ecefa-aedd-4e29-8209-a584e807bfcb" (UID: "c46ecefa-aedd-4e29-8209-a584e807bfcb"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.963305 4910 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46ecefa-aedd-4e29-8209-a584e807bfcb-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.963468 4910 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c46ecefa-aedd-4e29-8209-a584e807bfcb-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.963548 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m8lq5\" (UniqueName: \"kubernetes.io/projected/c46ecefa-aedd-4e29-8209-a584e807bfcb-kube-api-access-m8lq5\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.963643 4910 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.963717 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.963781 4910 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.979023 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c46ecefa-aedd-4e29-8209-a584e807bfcb" (UID: "c46ecefa-aedd-4e29-8209-a584e807bfcb"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.980166 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-config-data" (OuterVolumeSpecName: "config-data") pod "c46ecefa-aedd-4e29-8209-a584e807bfcb" (UID: "c46ecefa-aedd-4e29-8209-a584e807bfcb"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:29 crc kubenswrapper[4910]: I1125 21:50:29.987180 4910 scope.go:117] "RemoveContainer" containerID="a616a1dfdd96eb898135f6b0dd2654123cffa0194411868f7f9de6525e19edd9" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.013133 4910 scope.go:117] "RemoveContainer" containerID="b19d9a8203a344e3092c7ab63c58edbaaf5dba58db274192a9a37a1fe5ceb17e" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.038742 4910 scope.go:117] "RemoveContainer" containerID="f5505e64f7dfbfff65653a7e541748a0b19c95991f2e8ce3c0b58644f3fb4be6" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.062747 4910 scope.go:117] "RemoveContainer" containerID="dbf2155701bb5ddb9d1aaa5c4f19faae3e02ad65f8d0875d24b85a52af7fa244" Nov 25 21:50:30 crc kubenswrapper[4910]: E1125 21:50:30.063766 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbf2155701bb5ddb9d1aaa5c4f19faae3e02ad65f8d0875d24b85a52af7fa244\": container with ID starting with dbf2155701bb5ddb9d1aaa5c4f19faae3e02ad65f8d0875d24b85a52af7fa244 not found: ID does not exist" containerID="dbf2155701bb5ddb9d1aaa5c4f19faae3e02ad65f8d0875d24b85a52af7fa244" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.063823 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbf2155701bb5ddb9d1aaa5c4f19faae3e02ad65f8d0875d24b85a52af7fa244"} err="failed to get container status \"dbf2155701bb5ddb9d1aaa5c4f19faae3e02ad65f8d0875d24b85a52af7fa244\": rpc error: code = NotFound desc = could not find container \"dbf2155701bb5ddb9d1aaa5c4f19faae3e02ad65f8d0875d24b85a52af7fa244\": container with ID starting with dbf2155701bb5ddb9d1aaa5c4f19faae3e02ad65f8d0875d24b85a52af7fa244 not found: ID does not exist" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.063858 4910 scope.go:117] "RemoveContainer" containerID="a616a1dfdd96eb898135f6b0dd2654123cffa0194411868f7f9de6525e19edd9" Nov 25 21:50:30 crc kubenswrapper[4910]: E1125 21:50:30.064549 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a616a1dfdd96eb898135f6b0dd2654123cffa0194411868f7f9de6525e19edd9\": container with ID starting with a616a1dfdd96eb898135f6b0dd2654123cffa0194411868f7f9de6525e19edd9 not found: ID does not exist" containerID="a616a1dfdd96eb898135f6b0dd2654123cffa0194411868f7f9de6525e19edd9" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.064598 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a616a1dfdd96eb898135f6b0dd2654123cffa0194411868f7f9de6525e19edd9"} err="failed to get container status \"a616a1dfdd96eb898135f6b0dd2654123cffa0194411868f7f9de6525e19edd9\": rpc error: code = NotFound desc = could not find container \"a616a1dfdd96eb898135f6b0dd2654123cffa0194411868f7f9de6525e19edd9\": container with ID starting with a616a1dfdd96eb898135f6b0dd2654123cffa0194411868f7f9de6525e19edd9 not found: ID does not exist" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.064618 4910 scope.go:117] "RemoveContainer" containerID="b19d9a8203a344e3092c7ab63c58edbaaf5dba58db274192a9a37a1fe5ceb17e" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.065129 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.065149 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c46ecefa-aedd-4e29-8209-a584e807bfcb-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:30 crc kubenswrapper[4910]: E1125 21:50:30.065139 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b19d9a8203a344e3092c7ab63c58edbaaf5dba58db274192a9a37a1fe5ceb17e\": container with ID starting with b19d9a8203a344e3092c7ab63c58edbaaf5dba58db274192a9a37a1fe5ceb17e not found: ID does not exist" containerID="b19d9a8203a344e3092c7ab63c58edbaaf5dba58db274192a9a37a1fe5ceb17e" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.065212 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b19d9a8203a344e3092c7ab63c58edbaaf5dba58db274192a9a37a1fe5ceb17e"} err="failed to get container status \"b19d9a8203a344e3092c7ab63c58edbaaf5dba58db274192a9a37a1fe5ceb17e\": rpc error: code = NotFound desc = could not find container \"b19d9a8203a344e3092c7ab63c58edbaaf5dba58db274192a9a37a1fe5ceb17e\": container with ID starting with b19d9a8203a344e3092c7ab63c58edbaaf5dba58db274192a9a37a1fe5ceb17e not found: ID does not exist" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.065284 4910 scope.go:117] "RemoveContainer" containerID="f5505e64f7dfbfff65653a7e541748a0b19c95991f2e8ce3c0b58644f3fb4be6" Nov 25 21:50:30 crc kubenswrapper[4910]: E1125 21:50:30.065631 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5505e64f7dfbfff65653a7e541748a0b19c95991f2e8ce3c0b58644f3fb4be6\": container with ID starting with f5505e64f7dfbfff65653a7e541748a0b19c95991f2e8ce3c0b58644f3fb4be6 not found: ID does not exist" containerID="f5505e64f7dfbfff65653a7e541748a0b19c95991f2e8ce3c0b58644f3fb4be6" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.065669 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5505e64f7dfbfff65653a7e541748a0b19c95991f2e8ce3c0b58644f3fb4be6"} err="failed to get container status \"f5505e64f7dfbfff65653a7e541748a0b19c95991f2e8ce3c0b58644f3fb4be6\": rpc error: code = NotFound desc = could not find container \"f5505e64f7dfbfff65653a7e541748a0b19c95991f2e8ce3c0b58644f3fb4be6\": container with ID starting with f5505e64f7dfbfff65653a7e541748a0b19c95991f2e8ce3c0b58644f3fb4be6 not found: ID does not exist" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.249621 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.258195 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.287501 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:30 crc kubenswrapper[4910]: E1125 21:50:30.289817 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="ceilometer-central-agent" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.289896 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="ceilometer-central-agent" Nov 25 21:50:30 crc kubenswrapper[4910]: E1125 21:50:30.289968 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c213fd9-da6e-4244-b118-c038c8b70341" containerName="dnsmasq-dns" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.290020 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c213fd9-da6e-4244-b118-c038c8b70341" containerName="dnsmasq-dns" Nov 25 21:50:30 crc kubenswrapper[4910]: E1125 21:50:30.290093 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="sg-core" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.290147 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="sg-core" Nov 25 21:50:30 crc kubenswrapper[4910]: E1125 21:50:30.290229 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="proxy-httpd" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.290307 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="proxy-httpd" Nov 25 21:50:30 crc kubenswrapper[4910]: E1125 21:50:30.290707 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c213fd9-da6e-4244-b118-c038c8b70341" containerName="init" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.290795 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c213fd9-da6e-4244-b118-c038c8b70341" containerName="init" Nov 25 21:50:30 crc kubenswrapper[4910]: E1125 21:50:30.290880 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="ceilometer-notification-agent" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.290935 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="ceilometer-notification-agent" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.291170 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="ceilometer-notification-agent" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.291266 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c213fd9-da6e-4244-b118-c038c8b70341" containerName="dnsmasq-dns" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.291333 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="ceilometer-central-agent" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.292605 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="proxy-httpd" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.292679 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" containerName="sg-core" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.295054 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.301163 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.301412 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.301583 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.316797 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.478898 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.478996 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-scripts\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.479033 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.479069 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-run-httpd\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.479225 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-log-httpd\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.479312 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.479345 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ltv5k\" (UniqueName: \"kubernetes.io/projected/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-kube-api-access-ltv5k\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.479531 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-config-data\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.581500 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-config-data\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.581641 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.581678 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-scripts\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.581698 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.581721 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-run-httpd\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.581777 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-log-httpd\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.581804 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.581827 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ltv5k\" (UniqueName: \"kubernetes.io/projected/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-kube-api-access-ltv5k\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.582731 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-run-httpd\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.582957 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-log-httpd\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.586779 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-config-data\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.588388 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.588445 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-scripts\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.588918 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.589705 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.609567 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ltv5k\" (UniqueName: \"kubernetes.io/projected/f8fbe2b4-66f6-440d-8cdd-04534f6069ad-kube-api-access-ltv5k\") pod \"ceilometer-0\" (UID: \"f8fbe2b4-66f6-440d-8cdd-04534f6069ad\") " pod="openstack/ceilometer-0" Nov 25 21:50:30 crc kubenswrapper[4910]: I1125 21:50:30.616003 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 25 21:50:31 crc kubenswrapper[4910]: I1125 21:50:31.101955 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 25 21:50:31 crc kubenswrapper[4910]: W1125 21:50:31.103200 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf8fbe2b4_66f6_440d_8cdd_04534f6069ad.slice/crio-899503bc960fae91fcd25e274b2bee67fd0022494ff04dee829c2ed0e0244c25 WatchSource:0}: Error finding container 899503bc960fae91fcd25e274b2bee67fd0022494ff04dee829c2ed0e0244c25: Status 404 returned error can't find the container with id 899503bc960fae91fcd25e274b2bee67fd0022494ff04dee829c2ed0e0244c25 Nov 25 21:50:31 crc kubenswrapper[4910]: I1125 21:50:31.106285 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 21:50:31 crc kubenswrapper[4910]: I1125 21:50:31.219449 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c46ecefa-aedd-4e29-8209-a584e807bfcb" path="/var/lib/kubelet/pods/c46ecefa-aedd-4e29-8209-a584e807bfcb/volumes" Nov 25 21:50:31 crc kubenswrapper[4910]: I1125 21:50:31.925162 4910 generic.go:334] "Generic (PLEG): container finished" podID="dd9248e6-abed-48da-948e-3cf59171c0e7" containerID="3dad5fd9f70dee45b08e55b77d3c1230c80814ceb669e62513c208e736150bc9" exitCode=0 Nov 25 21:50:31 crc kubenswrapper[4910]: I1125 21:50:31.925475 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-tm28z" event={"ID":"dd9248e6-abed-48da-948e-3cf59171c0e7","Type":"ContainerDied","Data":"3dad5fd9f70dee45b08e55b77d3c1230c80814ceb669e62513c208e736150bc9"} Nov 25 21:50:31 crc kubenswrapper[4910]: I1125 21:50:31.933571 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fbe2b4-66f6-440d-8cdd-04534f6069ad","Type":"ContainerStarted","Data":"cdcfba193dd5236715f6d9076e90e91d5b9ba06a1fd02b6363d55ce98a0aa7d8"} Nov 25 21:50:31 crc kubenswrapper[4910]: I1125 21:50:31.933853 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fbe2b4-66f6-440d-8cdd-04534f6069ad","Type":"ContainerStarted","Data":"899503bc960fae91fcd25e274b2bee67fd0022494ff04dee829c2ed0e0244c25"} Nov 25 21:50:32 crc kubenswrapper[4910]: I1125 21:50:32.958332 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fbe2b4-66f6-440d-8cdd-04534f6069ad","Type":"ContainerStarted","Data":"152ef641d273d7d25c211530c8a82ba7351995e8869afcf8cab8e253e49076eb"} Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.360218 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.456144 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cq682\" (UniqueName: \"kubernetes.io/projected/dd9248e6-abed-48da-948e-3cf59171c0e7-kube-api-access-cq682\") pod \"dd9248e6-abed-48da-948e-3cf59171c0e7\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.456236 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-scripts\") pod \"dd9248e6-abed-48da-948e-3cf59171c0e7\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.456299 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-config-data\") pod \"dd9248e6-abed-48da-948e-3cf59171c0e7\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.456405 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-combined-ca-bundle\") pod \"dd9248e6-abed-48da-948e-3cf59171c0e7\" (UID: \"dd9248e6-abed-48da-948e-3cf59171c0e7\") " Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.462465 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-scripts" (OuterVolumeSpecName: "scripts") pod "dd9248e6-abed-48da-948e-3cf59171c0e7" (UID: "dd9248e6-abed-48da-948e-3cf59171c0e7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.462540 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd9248e6-abed-48da-948e-3cf59171c0e7-kube-api-access-cq682" (OuterVolumeSpecName: "kube-api-access-cq682") pod "dd9248e6-abed-48da-948e-3cf59171c0e7" (UID: "dd9248e6-abed-48da-948e-3cf59171c0e7"). InnerVolumeSpecName "kube-api-access-cq682". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.484639 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-config-data" (OuterVolumeSpecName: "config-data") pod "dd9248e6-abed-48da-948e-3cf59171c0e7" (UID: "dd9248e6-abed-48da-948e-3cf59171c0e7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.485132 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dd9248e6-abed-48da-948e-3cf59171c0e7" (UID: "dd9248e6-abed-48da-948e-3cf59171c0e7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.559202 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cq682\" (UniqueName: \"kubernetes.io/projected/dd9248e6-abed-48da-948e-3cf59171c0e7-kube-api-access-cq682\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.559267 4910 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-scripts\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.559281 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.559290 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd9248e6-abed-48da-948e-3cf59171c0e7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.969209 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-tm28z" event={"ID":"dd9248e6-abed-48da-948e-3cf59171c0e7","Type":"ContainerDied","Data":"5434dee8b103530027ddd1d71053bb2aa0ac888e6cfa7c1034343240317f8e1b"} Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.969625 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5434dee8b103530027ddd1d71053bb2aa0ac888e6cfa7c1034343240317f8e1b" Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.969295 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-tm28z" Nov 25 21:50:33 crc kubenswrapper[4910]: I1125 21:50:33.972446 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fbe2b4-66f6-440d-8cdd-04534f6069ad","Type":"ContainerStarted","Data":"950e3f7b54f7bb59624d32ecedd042050fef1d3cfdf073d9bf30aed8eb29dbba"} Nov 25 21:50:34 crc kubenswrapper[4910]: I1125 21:50:34.121136 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 21:50:34 crc kubenswrapper[4910]: I1125 21:50:34.121211 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 21:50:34 crc kubenswrapper[4910]: I1125 21:50:34.136022 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:50:34 crc kubenswrapper[4910]: I1125 21:50:34.154741 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:50:34 crc kubenswrapper[4910]: I1125 21:50:34.155307 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="b0a023f7-9af9-4c06-9838-0923ce4cf5a1" containerName="nova-scheduler-scheduler" containerID="cri-o://f84fc7ec83e2cad6fad9a1d532ec9ff1cca8cb47873fef011834ed27149fdd77" gracePeriod=30 Nov 25 21:50:34 crc kubenswrapper[4910]: I1125 21:50:34.177312 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:50:34 crc kubenswrapper[4910]: I1125 21:50:34.177619 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c3861cb8-f400-4715-88c0-96371e6f09f9" containerName="nova-metadata-log" containerID="cri-o://b657f1764c39782bb91df30bdbbcf90c759001179d59213518d0cfb401b2ada7" gracePeriod=30 Nov 25 21:50:34 crc kubenswrapper[4910]: I1125 21:50:34.178308 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c3861cb8-f400-4715-88c0-96371e6f09f9" containerName="nova-metadata-metadata" containerID="cri-o://29aae7a7b6e935fc6d6afd62a40f9a7a9b0c951ab4211932740f3305c097725e" gracePeriod=30 Nov 25 21:50:34 crc kubenswrapper[4910]: I1125 21:50:34.991746 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f8fbe2b4-66f6-440d-8cdd-04534f6069ad","Type":"ContainerStarted","Data":"03bef29b40629325b832cb89f670d9c0061ee29e69ac6eb70e4c16ad69eef511"} Nov 25 21:50:34 crc kubenswrapper[4910]: I1125 21:50:34.992321 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 25 21:50:35 crc kubenswrapper[4910]: I1125 21:50:34.999907 4910 generic.go:334] "Generic (PLEG): container finished" podID="c3861cb8-f400-4715-88c0-96371e6f09f9" containerID="b657f1764c39782bb91df30bdbbcf90c759001179d59213518d0cfb401b2ada7" exitCode=143 Nov 25 21:50:35 crc kubenswrapper[4910]: I1125 21:50:34.999981 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c3861cb8-f400-4715-88c0-96371e6f09f9","Type":"ContainerDied","Data":"b657f1764c39782bb91df30bdbbcf90c759001179d59213518d0cfb401b2ada7"} Nov 25 21:50:35 crc kubenswrapper[4910]: I1125 21:50:35.000370 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="bede3e63-0748-4865-a2b0-61fddcbd3291" containerName="nova-api-log" containerID="cri-o://51ee8f18fa412b576b90d5031135e777a98a8be919529593a5aecb720cc5facf" gracePeriod=30 Nov 25 21:50:35 crc kubenswrapper[4910]: I1125 21:50:35.000360 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="bede3e63-0748-4865-a2b0-61fddcbd3291" containerName="nova-api-api" containerID="cri-o://5b5d1584a6847399b1bb919cadf44011e5cdf957893cfa1866367663452414b0" gracePeriod=30 Nov 25 21:50:35 crc kubenswrapper[4910]: I1125 21:50:35.020603 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.8830278489999999 podStartE2EDuration="5.02057939s" podCreationTimestamp="2025-11-25 21:50:30 +0000 UTC" firstStartedPulling="2025-11-25 21:50:31.106003699 +0000 UTC m=+1186.568480021" lastFinishedPulling="2025-11-25 21:50:34.24355524 +0000 UTC m=+1189.706031562" observedRunningTime="2025-11-25 21:50:35.01040278 +0000 UTC m=+1190.472879102" watchObservedRunningTime="2025-11-25 21:50:35.02057939 +0000 UTC m=+1190.483055712" Nov 25 21:50:35 crc kubenswrapper[4910]: I1125 21:50:35.020857 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="bede3e63-0748-4865-a2b0-61fddcbd3291" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.202:8774/\": EOF" Nov 25 21:50:35 crc kubenswrapper[4910]: I1125 21:50:35.021033 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="bede3e63-0748-4865-a2b0-61fddcbd3291" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.202:8774/\": EOF" Nov 25 21:50:35 crc kubenswrapper[4910]: I1125 21:50:35.960804 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.011307 4910 generic.go:334] "Generic (PLEG): container finished" podID="b0a023f7-9af9-4c06-9838-0923ce4cf5a1" containerID="f84fc7ec83e2cad6fad9a1d532ec9ff1cca8cb47873fef011834ed27149fdd77" exitCode=0 Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.011373 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b0a023f7-9af9-4c06-9838-0923ce4cf5a1","Type":"ContainerDied","Data":"f84fc7ec83e2cad6fad9a1d532ec9ff1cca8cb47873fef011834ed27149fdd77"} Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.011405 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"b0a023f7-9af9-4c06-9838-0923ce4cf5a1","Type":"ContainerDied","Data":"9077b39b63ae50336e97bd082c5c512809325329316c32dc3a1402cc523b304b"} Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.011424 4910 scope.go:117] "RemoveContainer" containerID="f84fc7ec83e2cad6fad9a1d532ec9ff1cca8cb47873fef011834ed27149fdd77" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.011612 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.015114 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-combined-ca-bundle\") pod \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\" (UID: \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\") " Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.015290 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6v6t\" (UniqueName: \"kubernetes.io/projected/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-kube-api-access-g6v6t\") pod \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\" (UID: \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\") " Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.015366 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-config-data\") pod \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\" (UID: \"b0a023f7-9af9-4c06-9838-0923ce4cf5a1\") " Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.019809 4910 generic.go:334] "Generic (PLEG): container finished" podID="bede3e63-0748-4865-a2b0-61fddcbd3291" containerID="51ee8f18fa412b576b90d5031135e777a98a8be919529593a5aecb720cc5facf" exitCode=143 Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.019898 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bede3e63-0748-4865-a2b0-61fddcbd3291","Type":"ContainerDied","Data":"51ee8f18fa412b576b90d5031135e777a98a8be919529593a5aecb720cc5facf"} Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.038337 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-kube-api-access-g6v6t" (OuterVolumeSpecName: "kube-api-access-g6v6t") pod "b0a023f7-9af9-4c06-9838-0923ce4cf5a1" (UID: "b0a023f7-9af9-4c06-9838-0923ce4cf5a1"). InnerVolumeSpecName "kube-api-access-g6v6t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.062842 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-config-data" (OuterVolumeSpecName: "config-data") pod "b0a023f7-9af9-4c06-9838-0923ce4cf5a1" (UID: "b0a023f7-9af9-4c06-9838-0923ce4cf5a1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.069948 4910 scope.go:117] "RemoveContainer" containerID="f84fc7ec83e2cad6fad9a1d532ec9ff1cca8cb47873fef011834ed27149fdd77" Nov 25 21:50:36 crc kubenswrapper[4910]: E1125 21:50:36.070511 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f84fc7ec83e2cad6fad9a1d532ec9ff1cca8cb47873fef011834ed27149fdd77\": container with ID starting with f84fc7ec83e2cad6fad9a1d532ec9ff1cca8cb47873fef011834ed27149fdd77 not found: ID does not exist" containerID="f84fc7ec83e2cad6fad9a1d532ec9ff1cca8cb47873fef011834ed27149fdd77" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.070625 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f84fc7ec83e2cad6fad9a1d532ec9ff1cca8cb47873fef011834ed27149fdd77"} err="failed to get container status \"f84fc7ec83e2cad6fad9a1d532ec9ff1cca8cb47873fef011834ed27149fdd77\": rpc error: code = NotFound desc = could not find container \"f84fc7ec83e2cad6fad9a1d532ec9ff1cca8cb47873fef011834ed27149fdd77\": container with ID starting with f84fc7ec83e2cad6fad9a1d532ec9ff1cca8cb47873fef011834ed27149fdd77 not found: ID does not exist" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.083906 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b0a023f7-9af9-4c06-9838-0923ce4cf5a1" (UID: "b0a023f7-9af9-4c06-9838-0923ce4cf5a1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.120641 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6v6t\" (UniqueName: \"kubernetes.io/projected/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-kube-api-access-g6v6t\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.120994 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.121009 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b0a023f7-9af9-4c06-9838-0923ce4cf5a1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.362093 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.376322 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.384149 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:50:36 crc kubenswrapper[4910]: E1125 21:50:36.384747 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd9248e6-abed-48da-948e-3cf59171c0e7" containerName="nova-manage" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.384769 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd9248e6-abed-48da-948e-3cf59171c0e7" containerName="nova-manage" Nov 25 21:50:36 crc kubenswrapper[4910]: E1125 21:50:36.384783 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0a023f7-9af9-4c06-9838-0923ce4cf5a1" containerName="nova-scheduler-scheduler" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.384790 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0a023f7-9af9-4c06-9838-0923ce4cf5a1" containerName="nova-scheduler-scheduler" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.385008 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0a023f7-9af9-4c06-9838-0923ce4cf5a1" containerName="nova-scheduler-scheduler" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.385025 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd9248e6-abed-48da-948e-3cf59171c0e7" containerName="nova-manage" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.385889 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.389098 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.394940 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.434001 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dba0933-64a2-4286-baee-149ebff5c09d-config-data\") pod \"nova-scheduler-0\" (UID: \"8dba0933-64a2-4286-baee-149ebff5c09d\") " pod="openstack/nova-scheduler-0" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.434076 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dba0933-64a2-4286-baee-149ebff5c09d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8dba0933-64a2-4286-baee-149ebff5c09d\") " pod="openstack/nova-scheduler-0" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.434479 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9wmr\" (UniqueName: \"kubernetes.io/projected/8dba0933-64a2-4286-baee-149ebff5c09d-kube-api-access-q9wmr\") pod \"nova-scheduler-0\" (UID: \"8dba0933-64a2-4286-baee-149ebff5c09d\") " pod="openstack/nova-scheduler-0" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.537096 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dba0933-64a2-4286-baee-149ebff5c09d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8dba0933-64a2-4286-baee-149ebff5c09d\") " pod="openstack/nova-scheduler-0" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.537163 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9wmr\" (UniqueName: \"kubernetes.io/projected/8dba0933-64a2-4286-baee-149ebff5c09d-kube-api-access-q9wmr\") pod \"nova-scheduler-0\" (UID: \"8dba0933-64a2-4286-baee-149ebff5c09d\") " pod="openstack/nova-scheduler-0" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.537348 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dba0933-64a2-4286-baee-149ebff5c09d-config-data\") pod \"nova-scheduler-0\" (UID: \"8dba0933-64a2-4286-baee-149ebff5c09d\") " pod="openstack/nova-scheduler-0" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.541524 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dba0933-64a2-4286-baee-149ebff5c09d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"8dba0933-64a2-4286-baee-149ebff5c09d\") " pod="openstack/nova-scheduler-0" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.544473 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dba0933-64a2-4286-baee-149ebff5c09d-config-data\") pod \"nova-scheduler-0\" (UID: \"8dba0933-64a2-4286-baee-149ebff5c09d\") " pod="openstack/nova-scheduler-0" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.554628 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9wmr\" (UniqueName: \"kubernetes.io/projected/8dba0933-64a2-4286-baee-149ebff5c09d-kube-api-access-q9wmr\") pod \"nova-scheduler-0\" (UID: \"8dba0933-64a2-4286-baee-149ebff5c09d\") " pod="openstack/nova-scheduler-0" Nov 25 21:50:36 crc kubenswrapper[4910]: I1125 21:50:36.734919 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.198795 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.224828 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0a023f7-9af9-4c06-9838-0923ce4cf5a1" path="/var/lib/kubelet/pods/b0a023f7-9af9-4c06-9838-0923ce4cf5a1/volumes" Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.752694 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.766341 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-nova-metadata-tls-certs\") pod \"c3861cb8-f400-4715-88c0-96371e6f09f9\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.766788 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3861cb8-f400-4715-88c0-96371e6f09f9-logs\") pod \"c3861cb8-f400-4715-88c0-96371e6f09f9\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.766866 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-combined-ca-bundle\") pod \"c3861cb8-f400-4715-88c0-96371e6f09f9\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.766944 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xw2js\" (UniqueName: \"kubernetes.io/projected/c3861cb8-f400-4715-88c0-96371e6f09f9-kube-api-access-xw2js\") pod \"c3861cb8-f400-4715-88c0-96371e6f09f9\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.766985 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-config-data\") pod \"c3861cb8-f400-4715-88c0-96371e6f09f9\" (UID: \"c3861cb8-f400-4715-88c0-96371e6f09f9\") " Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.768489 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3861cb8-f400-4715-88c0-96371e6f09f9-logs" (OuterVolumeSpecName: "logs") pod "c3861cb8-f400-4715-88c0-96371e6f09f9" (UID: "c3861cb8-f400-4715-88c0-96371e6f09f9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.772011 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3861cb8-f400-4715-88c0-96371e6f09f9-kube-api-access-xw2js" (OuterVolumeSpecName: "kube-api-access-xw2js") pod "c3861cb8-f400-4715-88c0-96371e6f09f9" (UID: "c3861cb8-f400-4715-88c0-96371e6f09f9"). InnerVolumeSpecName "kube-api-access-xw2js". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.809990 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-config-data" (OuterVolumeSpecName: "config-data") pod "c3861cb8-f400-4715-88c0-96371e6f09f9" (UID: "c3861cb8-f400-4715-88c0-96371e6f09f9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.820996 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c3861cb8-f400-4715-88c0-96371e6f09f9" (UID: "c3861cb8-f400-4715-88c0-96371e6f09f9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.847702 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "c3861cb8-f400-4715-88c0-96371e6f09f9" (UID: "c3861cb8-f400-4715-88c0-96371e6f09f9"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.869620 4910 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.869655 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3861cb8-f400-4715-88c0-96371e6f09f9-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.869665 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.869675 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xw2js\" (UniqueName: \"kubernetes.io/projected/c3861cb8-f400-4715-88c0-96371e6f09f9-kube-api-access-xw2js\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:37 crc kubenswrapper[4910]: I1125 21:50:37.869684 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3861cb8-f400-4715-88c0-96371e6f09f9-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.046285 4910 generic.go:334] "Generic (PLEG): container finished" podID="c3861cb8-f400-4715-88c0-96371e6f09f9" containerID="29aae7a7b6e935fc6d6afd62a40f9a7a9b0c951ab4211932740f3305c097725e" exitCode=0 Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.046370 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c3861cb8-f400-4715-88c0-96371e6f09f9","Type":"ContainerDied","Data":"29aae7a7b6e935fc6d6afd62a40f9a7a9b0c951ab4211932740f3305c097725e"} Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.046432 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c3861cb8-f400-4715-88c0-96371e6f09f9","Type":"ContainerDied","Data":"0af0714eddcc81e4dad636cc7d4d7fa7d4686ebbe98c8b18590959de916d92b0"} Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.046451 4910 scope.go:117] "RemoveContainer" containerID="29aae7a7b6e935fc6d6afd62a40f9a7a9b0c951ab4211932740f3305c097725e" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.046567 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.049929 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8dba0933-64a2-4286-baee-149ebff5c09d","Type":"ContainerStarted","Data":"f698f7ae60b4d9503235117ff7d7aa8281de1e756bcf40f46dded3ae1081bb2c"} Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.049971 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"8dba0933-64a2-4286-baee-149ebff5c09d","Type":"ContainerStarted","Data":"1c76b8f6d726de55140ce2716fa50d7c44d15a129267549803bf0fc613386948"} Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.085938 4910 scope.go:117] "RemoveContainer" containerID="b657f1764c39782bb91df30bdbbcf90c759001179d59213518d0cfb401b2ada7" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.089136 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.089111709 podStartE2EDuration="2.089111709s" podCreationTimestamp="2025-11-25 21:50:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:50:38.069716144 +0000 UTC m=+1193.532192466" watchObservedRunningTime="2025-11-25 21:50:38.089111709 +0000 UTC m=+1193.551588031" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.115466 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.115479 4910 scope.go:117] "RemoveContainer" containerID="29aae7a7b6e935fc6d6afd62a40f9a7a9b0c951ab4211932740f3305c097725e" Nov 25 21:50:38 crc kubenswrapper[4910]: E1125 21:50:38.125586 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29aae7a7b6e935fc6d6afd62a40f9a7a9b0c951ab4211932740f3305c097725e\": container with ID starting with 29aae7a7b6e935fc6d6afd62a40f9a7a9b0c951ab4211932740f3305c097725e not found: ID does not exist" containerID="29aae7a7b6e935fc6d6afd62a40f9a7a9b0c951ab4211932740f3305c097725e" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.125765 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29aae7a7b6e935fc6d6afd62a40f9a7a9b0c951ab4211932740f3305c097725e"} err="failed to get container status \"29aae7a7b6e935fc6d6afd62a40f9a7a9b0c951ab4211932740f3305c097725e\": rpc error: code = NotFound desc = could not find container \"29aae7a7b6e935fc6d6afd62a40f9a7a9b0c951ab4211932740f3305c097725e\": container with ID starting with 29aae7a7b6e935fc6d6afd62a40f9a7a9b0c951ab4211932740f3305c097725e not found: ID does not exist" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.125802 4910 scope.go:117] "RemoveContainer" containerID="b657f1764c39782bb91df30bdbbcf90c759001179d59213518d0cfb401b2ada7" Nov 25 21:50:38 crc kubenswrapper[4910]: E1125 21:50:38.126366 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b657f1764c39782bb91df30bdbbcf90c759001179d59213518d0cfb401b2ada7\": container with ID starting with b657f1764c39782bb91df30bdbbcf90c759001179d59213518d0cfb401b2ada7 not found: ID does not exist" containerID="b657f1764c39782bb91df30bdbbcf90c759001179d59213518d0cfb401b2ada7" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.126416 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b657f1764c39782bb91df30bdbbcf90c759001179d59213518d0cfb401b2ada7"} err="failed to get container status \"b657f1764c39782bb91df30bdbbcf90c759001179d59213518d0cfb401b2ada7\": rpc error: code = NotFound desc = could not find container \"b657f1764c39782bb91df30bdbbcf90c759001179d59213518d0cfb401b2ada7\": container with ID starting with b657f1764c39782bb91df30bdbbcf90c759001179d59213518d0cfb401b2ada7 not found: ID does not exist" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.139839 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.154841 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:50:38 crc kubenswrapper[4910]: E1125 21:50:38.155369 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3861cb8-f400-4715-88c0-96371e6f09f9" containerName="nova-metadata-metadata" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.155388 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3861cb8-f400-4715-88c0-96371e6f09f9" containerName="nova-metadata-metadata" Nov 25 21:50:38 crc kubenswrapper[4910]: E1125 21:50:38.155434 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3861cb8-f400-4715-88c0-96371e6f09f9" containerName="nova-metadata-log" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.155441 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3861cb8-f400-4715-88c0-96371e6f09f9" containerName="nova-metadata-log" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.155614 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3861cb8-f400-4715-88c0-96371e6f09f9" containerName="nova-metadata-metadata" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.155641 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3861cb8-f400-4715-88c0-96371e6f09f9" containerName="nova-metadata-log" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.156797 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.158987 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.159495 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.165431 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.174464 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b80da77-dd73-4886-bcd1-88fb1c484af1-config-data\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.174542 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b80da77-dd73-4886-bcd1-88fb1c484af1-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.174621 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b80da77-dd73-4886-bcd1-88fb1c484af1-logs\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.174719 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b80da77-dd73-4886-bcd1-88fb1c484af1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.174737 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s42fk\" (UniqueName: \"kubernetes.io/projected/2b80da77-dd73-4886-bcd1-88fb1c484af1-kube-api-access-s42fk\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.276412 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b80da77-dd73-4886-bcd1-88fb1c484af1-config-data\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.276483 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b80da77-dd73-4886-bcd1-88fb1c484af1-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.276602 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b80da77-dd73-4886-bcd1-88fb1c484af1-logs\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.277398 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2b80da77-dd73-4886-bcd1-88fb1c484af1-logs\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.278590 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b80da77-dd73-4886-bcd1-88fb1c484af1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.278621 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s42fk\" (UniqueName: \"kubernetes.io/projected/2b80da77-dd73-4886-bcd1-88fb1c484af1-kube-api-access-s42fk\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.286444 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2b80da77-dd73-4886-bcd1-88fb1c484af1-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.286723 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2b80da77-dd73-4886-bcd1-88fb1c484af1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.286764 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2b80da77-dd73-4886-bcd1-88fb1c484af1-config-data\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.295190 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s42fk\" (UniqueName: \"kubernetes.io/projected/2b80da77-dd73-4886-bcd1-88fb1c484af1-kube-api-access-s42fk\") pod \"nova-metadata-0\" (UID: \"2b80da77-dd73-4886-bcd1-88fb1c484af1\") " pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.476548 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 25 21:50:38 crc kubenswrapper[4910]: I1125 21:50:38.963400 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 25 21:50:39 crc kubenswrapper[4910]: I1125 21:50:39.060442 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b80da77-dd73-4886-bcd1-88fb1c484af1","Type":"ContainerStarted","Data":"1aff3426df3bcc308ef3787b07fdfc911605526dcdf9a0a2442ba3e83fb24dc6"} Nov 25 21:50:39 crc kubenswrapper[4910]: I1125 21:50:39.240018 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3861cb8-f400-4715-88c0-96371e6f09f9" path="/var/lib/kubelet/pods/c3861cb8-f400-4715-88c0-96371e6f09f9/volumes" Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.075579 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b80da77-dd73-4886-bcd1-88fb1c484af1","Type":"ContainerStarted","Data":"a6b40ea41c89d3610740551cf5cde44c055546e0ea53c4381ac4ec6b645984c5"} Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.075843 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2b80da77-dd73-4886-bcd1-88fb1c484af1","Type":"ContainerStarted","Data":"9efc890ec938177adf99a11224cee6956dff5f81a457980db5ac92c4fc0b5da2"} Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.121880 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.121855002 podStartE2EDuration="2.121855002s" podCreationTimestamp="2025-11-25 21:50:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:50:40.110672535 +0000 UTC m=+1195.573148857" watchObservedRunningTime="2025-11-25 21:50:40.121855002 +0000 UTC m=+1195.584331324" Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.824866 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.853130 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-internal-tls-certs\") pod \"bede3e63-0748-4865-a2b0-61fddcbd3291\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.853277 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bede3e63-0748-4865-a2b0-61fddcbd3291-logs\") pod \"bede3e63-0748-4865-a2b0-61fddcbd3291\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.853374 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-public-tls-certs\") pod \"bede3e63-0748-4865-a2b0-61fddcbd3291\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.853421 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-config-data\") pod \"bede3e63-0748-4865-a2b0-61fddcbd3291\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.853492 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-combined-ca-bundle\") pod \"bede3e63-0748-4865-a2b0-61fddcbd3291\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.853547 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2mtx8\" (UniqueName: \"kubernetes.io/projected/bede3e63-0748-4865-a2b0-61fddcbd3291-kube-api-access-2mtx8\") pod \"bede3e63-0748-4865-a2b0-61fddcbd3291\" (UID: \"bede3e63-0748-4865-a2b0-61fddcbd3291\") " Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.863019 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bede3e63-0748-4865-a2b0-61fddcbd3291-logs" (OuterVolumeSpecName: "logs") pod "bede3e63-0748-4865-a2b0-61fddcbd3291" (UID: "bede3e63-0748-4865-a2b0-61fddcbd3291"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.863527 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bede3e63-0748-4865-a2b0-61fddcbd3291-kube-api-access-2mtx8" (OuterVolumeSpecName: "kube-api-access-2mtx8") pod "bede3e63-0748-4865-a2b0-61fddcbd3291" (UID: "bede3e63-0748-4865-a2b0-61fddcbd3291"). InnerVolumeSpecName "kube-api-access-2mtx8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.895517 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-config-data" (OuterVolumeSpecName: "config-data") pod "bede3e63-0748-4865-a2b0-61fddcbd3291" (UID: "bede3e63-0748-4865-a2b0-61fddcbd3291"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.906370 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bede3e63-0748-4865-a2b0-61fddcbd3291" (UID: "bede3e63-0748-4865-a2b0-61fddcbd3291"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.922122 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "bede3e63-0748-4865-a2b0-61fddcbd3291" (UID: "bede3e63-0748-4865-a2b0-61fddcbd3291"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.956277 4910 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bede3e63-0748-4865-a2b0-61fddcbd3291-logs\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.956306 4910 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.956317 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.956328 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.956336 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2mtx8\" (UniqueName: \"kubernetes.io/projected/bede3e63-0748-4865-a2b0-61fddcbd3291-kube-api-access-2mtx8\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:40 crc kubenswrapper[4910]: I1125 21:50:40.964848 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "bede3e63-0748-4865-a2b0-61fddcbd3291" (UID: "bede3e63-0748-4865-a2b0-61fddcbd3291"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.058922 4910 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bede3e63-0748-4865-a2b0-61fddcbd3291-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.089862 4910 generic.go:334] "Generic (PLEG): container finished" podID="bede3e63-0748-4865-a2b0-61fddcbd3291" containerID="5b5d1584a6847399b1bb919cadf44011e5cdf957893cfa1866367663452414b0" exitCode=0 Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.091180 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.093392 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bede3e63-0748-4865-a2b0-61fddcbd3291","Type":"ContainerDied","Data":"5b5d1584a6847399b1bb919cadf44011e5cdf957893cfa1866367663452414b0"} Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.093471 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"bede3e63-0748-4865-a2b0-61fddcbd3291","Type":"ContainerDied","Data":"f7753d45b045a23e21a7c03156262a5382f7320f8ff51d770497cee45c0d0d72"} Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.093504 4910 scope.go:117] "RemoveContainer" containerID="5b5d1584a6847399b1bb919cadf44011e5cdf957893cfa1866367663452414b0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.116701 4910 scope.go:117] "RemoveContainer" containerID="51ee8f18fa412b576b90d5031135e777a98a8be919529593a5aecb720cc5facf" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.130621 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.149503 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.158434 4910 scope.go:117] "RemoveContainer" containerID="5b5d1584a6847399b1bb919cadf44011e5cdf957893cfa1866367663452414b0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.158650 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 25 21:50:41 crc kubenswrapper[4910]: E1125 21:50:41.159052 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bede3e63-0748-4865-a2b0-61fddcbd3291" containerName="nova-api-log" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.159070 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="bede3e63-0748-4865-a2b0-61fddcbd3291" containerName="nova-api-log" Nov 25 21:50:41 crc kubenswrapper[4910]: E1125 21:50:41.159093 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bede3e63-0748-4865-a2b0-61fddcbd3291" containerName="nova-api-api" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.159099 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="bede3e63-0748-4865-a2b0-61fddcbd3291" containerName="nova-api-api" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.159619 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="bede3e63-0748-4865-a2b0-61fddcbd3291" containerName="nova-api-api" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.159649 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="bede3e63-0748-4865-a2b0-61fddcbd3291" containerName="nova-api-log" Nov 25 21:50:41 crc kubenswrapper[4910]: E1125 21:50:41.162045 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b5d1584a6847399b1bb919cadf44011e5cdf957893cfa1866367663452414b0\": container with ID starting with 5b5d1584a6847399b1bb919cadf44011e5cdf957893cfa1866367663452414b0 not found: ID does not exist" containerID="5b5d1584a6847399b1bb919cadf44011e5cdf957893cfa1866367663452414b0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.162097 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b5d1584a6847399b1bb919cadf44011e5cdf957893cfa1866367663452414b0"} err="failed to get container status \"5b5d1584a6847399b1bb919cadf44011e5cdf957893cfa1866367663452414b0\": rpc error: code = NotFound desc = could not find container \"5b5d1584a6847399b1bb919cadf44011e5cdf957893cfa1866367663452414b0\": container with ID starting with 5b5d1584a6847399b1bb919cadf44011e5cdf957893cfa1866367663452414b0 not found: ID does not exist" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.162132 4910 scope.go:117] "RemoveContainer" containerID="51ee8f18fa412b576b90d5031135e777a98a8be919529593a5aecb720cc5facf" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.162471 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: E1125 21:50:41.162912 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51ee8f18fa412b576b90d5031135e777a98a8be919529593a5aecb720cc5facf\": container with ID starting with 51ee8f18fa412b576b90d5031135e777a98a8be919529593a5aecb720cc5facf not found: ID does not exist" containerID="51ee8f18fa412b576b90d5031135e777a98a8be919529593a5aecb720cc5facf" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.162978 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51ee8f18fa412b576b90d5031135e777a98a8be919529593a5aecb720cc5facf"} err="failed to get container status \"51ee8f18fa412b576b90d5031135e777a98a8be919529593a5aecb720cc5facf\": rpc error: code = NotFound desc = could not find container \"51ee8f18fa412b576b90d5031135e777a98a8be919529593a5aecb720cc5facf\": container with ID starting with 51ee8f18fa412b576b90d5031135e777a98a8be919529593a5aecb720cc5facf not found: ID does not exist" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.164631 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.165101 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.170836 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.195495 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.218190 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bede3e63-0748-4865-a2b0-61fddcbd3291" path="/var/lib/kubelet/pods/bede3e63-0748-4865-a2b0-61fddcbd3291/volumes" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.264845 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.265332 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-public-tls-certs\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.265506 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.265937 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-config-data\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.266072 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knl66\" (UniqueName: \"kubernetes.io/projected/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-kube-api-access-knl66\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.266296 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-logs\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.369008 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-public-tls-certs\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.369090 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.369178 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-config-data\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.369232 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knl66\" (UniqueName: \"kubernetes.io/projected/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-kube-api-access-knl66\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.369320 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-logs\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.369520 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.370083 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-logs\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.374096 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-public-tls-certs\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.374211 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.374962 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-config-data\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.375538 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-internal-tls-certs\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.387307 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knl66\" (UniqueName: \"kubernetes.io/projected/caf92f8f-f8b6-4214-8b76-13cfe6bafd4a-kube-api-access-knl66\") pod \"nova-api-0\" (UID: \"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a\") " pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.499029 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.738647 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 25 21:50:41 crc kubenswrapper[4910]: W1125 21:50:41.969970 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcaf92f8f_f8b6_4214_8b76_13cfe6bafd4a.slice/crio-40b9f09115ddc8095c33a6d25516f33652f8c840c20ee75c000b56ca8f2fd9d8 WatchSource:0}: Error finding container 40b9f09115ddc8095c33a6d25516f33652f8c840c20ee75c000b56ca8f2fd9d8: Status 404 returned error can't find the container with id 40b9f09115ddc8095c33a6d25516f33652f8c840c20ee75c000b56ca8f2fd9d8 Nov 25 21:50:41 crc kubenswrapper[4910]: I1125 21:50:41.970544 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 25 21:50:42 crc kubenswrapper[4910]: I1125 21:50:42.099570 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a","Type":"ContainerStarted","Data":"40b9f09115ddc8095c33a6d25516f33652f8c840c20ee75c000b56ca8f2fd9d8"} Nov 25 21:50:43 crc kubenswrapper[4910]: I1125 21:50:43.116207 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a","Type":"ContainerStarted","Data":"256f9dd3be4e29cc52a08338b6b9c64ecfe19adab3f6991e06d1d737c62ab7d5"} Nov 25 21:50:43 crc kubenswrapper[4910]: I1125 21:50:43.116292 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"caf92f8f-f8b6-4214-8b76-13cfe6bafd4a","Type":"ContainerStarted","Data":"932c403599e1206fae634d849fb54bb413e2ff747dfb9f1bdfb0da5fa19aa7b7"} Nov 25 21:50:43 crc kubenswrapper[4910]: I1125 21:50:43.150458 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.150433791 podStartE2EDuration="2.150433791s" podCreationTimestamp="2025-11-25 21:50:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:50:43.143273211 +0000 UTC m=+1198.605749543" watchObservedRunningTime="2025-11-25 21:50:43.150433791 +0000 UTC m=+1198.612910123" Nov 25 21:50:43 crc kubenswrapper[4910]: I1125 21:50:43.477047 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 21:50:43 crc kubenswrapper[4910]: I1125 21:50:43.477728 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 25 21:50:46 crc kubenswrapper[4910]: I1125 21:50:46.736378 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 25 21:50:46 crc kubenswrapper[4910]: I1125 21:50:46.767035 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 25 21:50:47 crc kubenswrapper[4910]: I1125 21:50:47.187733 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 25 21:50:48 crc kubenswrapper[4910]: I1125 21:50:48.478151 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 21:50:48 crc kubenswrapper[4910]: I1125 21:50:48.478878 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 25 21:50:49 crc kubenswrapper[4910]: I1125 21:50:49.498487 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="2b80da77-dd73-4886-bcd1-88fb1c484af1" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.206:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 25 21:50:49 crc kubenswrapper[4910]: I1125 21:50:49.498508 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="2b80da77-dd73-4886-bcd1-88fb1c484af1" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.206:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 21:50:51 crc kubenswrapper[4910]: I1125 21:50:51.499671 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 21:50:51 crc kubenswrapper[4910]: I1125 21:50:51.500097 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 25 21:50:52 crc kubenswrapper[4910]: I1125 21:50:52.512462 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="caf92f8f-f8b6-4214-8b76-13cfe6bafd4a" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.207:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 21:50:52 crc kubenswrapper[4910]: I1125 21:50:52.512507 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="caf92f8f-f8b6-4214-8b76-13cfe6bafd4a" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.207:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 25 21:50:58 crc kubenswrapper[4910]: I1125 21:50:58.483804 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 21:50:58 crc kubenswrapper[4910]: I1125 21:50:58.484551 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 25 21:50:58 crc kubenswrapper[4910]: I1125 21:50:58.489921 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 21:50:58 crc kubenswrapper[4910]: I1125 21:50:58.497111 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 25 21:51:00 crc kubenswrapper[4910]: I1125 21:51:00.631137 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 25 21:51:01 crc kubenswrapper[4910]: I1125 21:51:01.510448 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 21:51:01 crc kubenswrapper[4910]: I1125 21:51:01.511065 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 21:51:01 crc kubenswrapper[4910]: I1125 21:51:01.519618 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 25 21:51:01 crc kubenswrapper[4910]: I1125 21:51:01.521672 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 21:51:02 crc kubenswrapper[4910]: I1125 21:51:02.334331 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 25 21:51:02 crc kubenswrapper[4910]: I1125 21:51:02.341797 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 25 21:51:10 crc kubenswrapper[4910]: I1125 21:51:10.686384 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 21:51:11 crc kubenswrapper[4910]: I1125 21:51:11.843118 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 21:51:15 crc kubenswrapper[4910]: I1125 21:51:15.086361 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="58eca84e-dfac-4af7-ad45-241a776f81d6" containerName="rabbitmq" containerID="cri-o://a878a11275d059b3812b533330998964e76b42f5acaf365e2ee090734c1fa3f4" gracePeriod=604796 Nov 25 21:51:16 crc kubenswrapper[4910]: I1125 21:51:16.200899 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="9b20e3e8-ac28-471d-82ed-e619a78a7c55" containerName="rabbitmq" containerID="cri-o://b825c8d5cffc9bffbd7bed750a3dc1621251f7c189ded2dc33ebc862bd5e61e2" gracePeriod=604796 Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.553601 4910 generic.go:334] "Generic (PLEG): container finished" podID="58eca84e-dfac-4af7-ad45-241a776f81d6" containerID="a878a11275d059b3812b533330998964e76b42f5acaf365e2ee090734c1fa3f4" exitCode=0 Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.553688 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"58eca84e-dfac-4af7-ad45-241a776f81d6","Type":"ContainerDied","Data":"a878a11275d059b3812b533330998964e76b42f5acaf365e2ee090734c1fa3f4"} Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.700767 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.800377 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/58eca84e-dfac-4af7-ad45-241a776f81d6-erlang-cookie-secret\") pod \"58eca84e-dfac-4af7-ad45-241a776f81d6\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.800507 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/58eca84e-dfac-4af7-ad45-241a776f81d6-pod-info\") pod \"58eca84e-dfac-4af7-ad45-241a776f81d6\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.800538 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-plugins-conf\") pod \"58eca84e-dfac-4af7-ad45-241a776f81d6\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.800582 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-plugins\") pod \"58eca84e-dfac-4af7-ad45-241a776f81d6\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.800609 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-confd\") pod \"58eca84e-dfac-4af7-ad45-241a776f81d6\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.800666 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-erlang-cookie\") pod \"58eca84e-dfac-4af7-ad45-241a776f81d6\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.800722 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"58eca84e-dfac-4af7-ad45-241a776f81d6\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.800746 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hlcp4\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-kube-api-access-hlcp4\") pod \"58eca84e-dfac-4af7-ad45-241a776f81d6\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.800788 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-server-conf\") pod \"58eca84e-dfac-4af7-ad45-241a776f81d6\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.800824 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-tls\") pod \"58eca84e-dfac-4af7-ad45-241a776f81d6\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.800876 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-config-data\") pod \"58eca84e-dfac-4af7-ad45-241a776f81d6\" (UID: \"58eca84e-dfac-4af7-ad45-241a776f81d6\") " Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.801239 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "58eca84e-dfac-4af7-ad45-241a776f81d6" (UID: "58eca84e-dfac-4af7-ad45-241a776f81d6"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.801450 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "58eca84e-dfac-4af7-ad45-241a776f81d6" (UID: "58eca84e-dfac-4af7-ad45-241a776f81d6"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.801715 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "58eca84e-dfac-4af7-ad45-241a776f81d6" (UID: "58eca84e-dfac-4af7-ad45-241a776f81d6"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.817594 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "persistence") pod "58eca84e-dfac-4af7-ad45-241a776f81d6" (UID: "58eca84e-dfac-4af7-ad45-241a776f81d6"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.817584 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/58eca84e-dfac-4af7-ad45-241a776f81d6-pod-info" (OuterVolumeSpecName: "pod-info") pod "58eca84e-dfac-4af7-ad45-241a776f81d6" (UID: "58eca84e-dfac-4af7-ad45-241a776f81d6"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.817607 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/58eca84e-dfac-4af7-ad45-241a776f81d6-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "58eca84e-dfac-4af7-ad45-241a776f81d6" (UID: "58eca84e-dfac-4af7-ad45-241a776f81d6"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.817726 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "58eca84e-dfac-4af7-ad45-241a776f81d6" (UID: "58eca84e-dfac-4af7-ad45-241a776f81d6"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.817756 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-kube-api-access-hlcp4" (OuterVolumeSpecName: "kube-api-access-hlcp4") pod "58eca84e-dfac-4af7-ad45-241a776f81d6" (UID: "58eca84e-dfac-4af7-ad45-241a776f81d6"). InnerVolumeSpecName "kube-api-access-hlcp4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.845926 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-config-data" (OuterVolumeSpecName: "config-data") pod "58eca84e-dfac-4af7-ad45-241a776f81d6" (UID: "58eca84e-dfac-4af7-ad45-241a776f81d6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.890606 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-server-conf" (OuterVolumeSpecName: "server-conf") pod "58eca84e-dfac-4af7-ad45-241a776f81d6" (UID: "58eca84e-dfac-4af7-ad45-241a776f81d6"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.892926 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="9b20e3e8-ac28-471d-82ed-e619a78a7c55" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.99:5671: connect: connection refused" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.904661 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.904703 4910 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/58eca84e-dfac-4af7-ad45-241a776f81d6-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.904714 4910 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/58eca84e-dfac-4af7-ad45-241a776f81d6-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.904722 4910 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.904730 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.904739 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.904769 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.904779 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hlcp4\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-kube-api-access-hlcp4\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.904788 4910 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/58eca84e-dfac-4af7-ad45-241a776f81d6-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.904798 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.932397 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 25 21:51:21 crc kubenswrapper[4910]: I1125 21:51:21.953233 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "58eca84e-dfac-4af7-ad45-241a776f81d6" (UID: "58eca84e-dfac-4af7-ad45-241a776f81d6"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.007269 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/58eca84e-dfac-4af7-ad45-241a776f81d6-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.007309 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.568666 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"58eca84e-dfac-4af7-ad45-241a776f81d6","Type":"ContainerDied","Data":"d4b506e7ae1191bfc6b0dedb055051fb3ed564e5d6e4379bd1616ddcc08fb9af"} Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.569009 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.569731 4910 scope.go:117] "RemoveContainer" containerID="a878a11275d059b3812b533330998964e76b42f5acaf365e2ee090734c1fa3f4" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.572495 4910 generic.go:334] "Generic (PLEG): container finished" podID="9b20e3e8-ac28-471d-82ed-e619a78a7c55" containerID="b825c8d5cffc9bffbd7bed750a3dc1621251f7c189ded2dc33ebc862bd5e61e2" exitCode=0 Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.572545 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9b20e3e8-ac28-471d-82ed-e619a78a7c55","Type":"ContainerDied","Data":"b825c8d5cffc9bffbd7bed750a3dc1621251f7c189ded2dc33ebc862bd5e61e2"} Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.624337 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.674362 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.675099 4910 scope.go:117] "RemoveContainer" containerID="3a5caa60ba704cb56a0ba9be7c1f393181c0267978e6e6d9500155cc7e70b7c2" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.729095 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 21:51:22 crc kubenswrapper[4910]: E1125 21:51:22.730016 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58eca84e-dfac-4af7-ad45-241a776f81d6" containerName="setup-container" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.730032 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="58eca84e-dfac-4af7-ad45-241a776f81d6" containerName="setup-container" Nov 25 21:51:22 crc kubenswrapper[4910]: E1125 21:51:22.730057 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="58eca84e-dfac-4af7-ad45-241a776f81d6" containerName="rabbitmq" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.730065 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="58eca84e-dfac-4af7-ad45-241a776f81d6" containerName="rabbitmq" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.730271 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="58eca84e-dfac-4af7-ad45-241a776f81d6" containerName="rabbitmq" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.731423 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.734502 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.734684 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-76g8c" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.734767 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.735024 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.736486 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.736699 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.736883 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.752346 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.824562 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-kf4tg"] Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.826386 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.829783 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.830082 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.843235 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-kf4tg"] Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.861168 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.861236 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.861279 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.861323 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.861354 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.861691 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.861908 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-config-data\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.861998 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.862053 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nmmc9\" (UniqueName: \"kubernetes.io/projected/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-kube-api-access-nmmc9\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.862078 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.862145 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.963565 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-plugins\") pod \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.963616 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8w9b8\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-kube-api-access-8w9b8\") pod \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.963657 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-erlang-cookie\") pod \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.963770 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-tls\") pod \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.963843 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9b20e3e8-ac28-471d-82ed-e619a78a7c55-erlang-cookie-secret\") pod \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.963884 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.963917 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-plugins-conf\") pod \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964009 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9b20e3e8-ac28-471d-82ed-e619a78a7c55-pod-info\") pod \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964027 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-config-data\") pod \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964046 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-server-conf\") pod \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964080 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-confd\") pod \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\" (UID: \"9b20e3e8-ac28-471d-82ed-e619a78a7c55\") " Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964145 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "9b20e3e8-ac28-471d-82ed-e619a78a7c55" (UID: "9b20e3e8-ac28-471d-82ed-e619a78a7c55"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964317 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9g9w\" (UniqueName: \"kubernetes.io/projected/960cc043-e25c-4a45-8272-c92d70a8b6f7-kube-api-access-m9g9w\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964363 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964391 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964418 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964438 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-dns-svc\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964459 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964489 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964518 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-dns-swift-storage-0\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964548 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-config\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964569 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964605 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-openstack-edpm-ipam\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964654 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-config-data\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964694 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-ovsdbserver-nb\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964729 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964764 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nmmc9\" (UniqueName: \"kubernetes.io/projected/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-kube-api-access-nmmc9\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964788 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-ovsdbserver-sb\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964813 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964854 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.964937 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.965878 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.966964 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "9b20e3e8-ac28-471d-82ed-e619a78a7c55" (UID: "9b20e3e8-ac28-471d-82ed-e619a78a7c55"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.967017 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.967082 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.968382 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-server-conf\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.969782 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-kube-api-access-8w9b8" (OuterVolumeSpecName: "kube-api-access-8w9b8") pod "9b20e3e8-ac28-471d-82ed-e619a78a7c55" (UID: "9b20e3e8-ac28-471d-82ed-e619a78a7c55"). InnerVolumeSpecName "kube-api-access-8w9b8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.973469 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-config-data\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.973586 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.973994 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "9b20e3e8-ac28-471d-82ed-e619a78a7c55" (UID: "9b20e3e8-ac28-471d-82ed-e619a78a7c55"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.978259 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "9b20e3e8-ac28-471d-82ed-e619a78a7c55" (UID: "9b20e3e8-ac28-471d-82ed-e619a78a7c55"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.978671 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-pod-info\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.981205 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "9b20e3e8-ac28-471d-82ed-e619a78a7c55" (UID: "9b20e3e8-ac28-471d-82ed-e619a78a7c55"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.981348 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/9b20e3e8-ac28-471d-82ed-e619a78a7c55-pod-info" (OuterVolumeSpecName: "pod-info") pod "9b20e3e8-ac28-471d-82ed-e619a78a7c55" (UID: "9b20e3e8-ac28-471d-82ed-e619a78a7c55"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.981568 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b20e3e8-ac28-471d-82ed-e619a78a7c55-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "9b20e3e8-ac28-471d-82ed-e619a78a7c55" (UID: "9b20e3e8-ac28-471d-82ed-e619a78a7c55"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.992329 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.992580 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:22 crc kubenswrapper[4910]: I1125 21:51:22.994578 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.013335 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nmmc9\" (UniqueName: \"kubernetes.io/projected/9d06ec4c-6e1e-4fc9-9e41-59857b4494fd-kube-api-access-nmmc9\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.051072 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-config-data" (OuterVolumeSpecName: "config-data") pod "9b20e3e8-ac28-471d-82ed-e619a78a7c55" (UID: "9b20e3e8-ac28-471d-82ed-e619a78a7c55"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.054138 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd\") " pod="openstack/rabbitmq-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.067394 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-config\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068179 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-config\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068322 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-openstack-edpm-ipam\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068399 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-ovsdbserver-nb\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068454 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-ovsdbserver-sb\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068529 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9g9w\" (UniqueName: \"kubernetes.io/projected/960cc043-e25c-4a45-8272-c92d70a8b6f7-kube-api-access-m9g9w\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068564 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-dns-svc\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068630 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-dns-swift-storage-0\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068695 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068706 4910 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/9b20e3e8-ac28-471d-82ed-e619a78a7c55-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068725 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068735 4910 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068745 4910 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/9b20e3e8-ac28-471d-82ed-e619a78a7c55-pod-info\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068770 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068779 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8w9b8\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-kube-api-access-8w9b8\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.068789 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.070962 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-dns-svc\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.071205 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-ovsdbserver-sb\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.071337 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-dns-swift-storage-0\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.071911 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-openstack-edpm-ipam\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.071991 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-ovsdbserver-nb\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.072938 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-server-conf" (OuterVolumeSpecName: "server-conf") pod "9b20e3e8-ac28-471d-82ed-e619a78a7c55" (UID: "9b20e3e8-ac28-471d-82ed-e619a78a7c55"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.089089 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9g9w\" (UniqueName: \"kubernetes.io/projected/960cc043-e25c-4a45-8272-c92d70a8b6f7-kube-api-access-m9g9w\") pod \"dnsmasq-dns-5576978c7c-kf4tg\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.095360 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.097934 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "9b20e3e8-ac28-471d-82ed-e619a78a7c55" (UID: "9b20e3e8-ac28-471d-82ed-e619a78a7c55"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.098401 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.098453 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.149724 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.164046 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.169960 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.169983 4910 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/9b20e3e8-ac28-471d-82ed-e619a78a7c55-server-conf\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.169994 4910 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/9b20e3e8-ac28-471d-82ed-e619a78a7c55-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.217273 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="58eca84e-dfac-4af7-ad45-241a776f81d6" path="/var/lib/kubelet/pods/58eca84e-dfac-4af7-ad45-241a776f81d6/volumes" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.586996 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"9b20e3e8-ac28-471d-82ed-e619a78a7c55","Type":"ContainerDied","Data":"a305b01ce7967d3173f986621972e99d0c18daa8bbc25f6f49552e46b9200cab"} Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.587063 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.587119 4910 scope.go:117] "RemoveContainer" containerID="b825c8d5cffc9bffbd7bed750a3dc1621251f7c189ded2dc33ebc862bd5e61e2" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.617282 4910 scope.go:117] "RemoveContainer" containerID="a663883b01e5d94c3a7a51f2d4075ac478de107d5f88233f99ea3c93dfe1bda9" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.620874 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.641188 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.652694 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 21:51:23 crc kubenswrapper[4910]: E1125 21:51:23.653333 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b20e3e8-ac28-471d-82ed-e619a78a7c55" containerName="rabbitmq" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.653354 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b20e3e8-ac28-471d-82ed-e619a78a7c55" containerName="rabbitmq" Nov 25 21:51:23 crc kubenswrapper[4910]: E1125 21:51:23.653408 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b20e3e8-ac28-471d-82ed-e619a78a7c55" containerName="setup-container" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.653416 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b20e3e8-ac28-471d-82ed-e619a78a7c55" containerName="setup-container" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.653654 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b20e3e8-ac28-471d-82ed-e619a78a7c55" containerName="rabbitmq" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.655050 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.660850 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.660872 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-7hxk7" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.661110 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.661341 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.661516 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.661733 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.661912 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.665167 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.681143 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.681357 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.681568 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.681889 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.681920 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.681963 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhmnj\" (UniqueName: \"kubernetes.io/projected/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-kube-api-access-zhmnj\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.682123 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.682149 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.682176 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.682199 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.682277 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.730469 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.736940 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-kf4tg"] Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.784186 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.784268 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.784294 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.784324 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhmnj\" (UniqueName: \"kubernetes.io/projected/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-kube-api-access-zhmnj\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.784381 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.784403 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.784426 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.784442 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.784473 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.784530 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.784574 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.785615 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.786466 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.787587 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.788588 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.790283 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.790521 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.791117 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.798978 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.799039 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.799102 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.810205 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhmnj\" (UniqueName: \"kubernetes.io/projected/bc2bbda0-2d3e-4794-bc13-21bca025c6fe-kube-api-access-zhmnj\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.836998 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"bc2bbda0-2d3e-4794-bc13-21bca025c6fe\") " pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:23 crc kubenswrapper[4910]: I1125 21:51:23.993414 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:51:24 crc kubenswrapper[4910]: W1125 21:51:24.494801 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbc2bbda0_2d3e_4794_bc13_21bca025c6fe.slice/crio-7ca1a6e7479a0c588818899a4a59d73cadcec22ed1d5358ab5f3e20e122b644f WatchSource:0}: Error finding container 7ca1a6e7479a0c588818899a4a59d73cadcec22ed1d5358ab5f3e20e122b644f: Status 404 returned error can't find the container with id 7ca1a6e7479a0c588818899a4a59d73cadcec22ed1d5358ab5f3e20e122b644f Nov 25 21:51:24 crc kubenswrapper[4910]: I1125 21:51:24.495041 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 25 21:51:24 crc kubenswrapper[4910]: I1125 21:51:24.599543 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"bc2bbda0-2d3e-4794-bc13-21bca025c6fe","Type":"ContainerStarted","Data":"7ca1a6e7479a0c588818899a4a59d73cadcec22ed1d5358ab5f3e20e122b644f"} Nov 25 21:51:24 crc kubenswrapper[4910]: I1125 21:51:24.600987 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd","Type":"ContainerStarted","Data":"4bd33137726e2ab0e29d66d3a23f7707c7ea31f800b7d33783e76b7dd1b3456d"} Nov 25 21:51:24 crc kubenswrapper[4910]: I1125 21:51:24.604720 4910 generic.go:334] "Generic (PLEG): container finished" podID="960cc043-e25c-4a45-8272-c92d70a8b6f7" containerID="0c2fadfefb7e4c1f3a61f5d5375f0f018ecfd4ed6cfe301efa428c6048899643" exitCode=0 Nov 25 21:51:24 crc kubenswrapper[4910]: I1125 21:51:24.604748 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" event={"ID":"960cc043-e25c-4a45-8272-c92d70a8b6f7","Type":"ContainerDied","Data":"0c2fadfefb7e4c1f3a61f5d5375f0f018ecfd4ed6cfe301efa428c6048899643"} Nov 25 21:51:24 crc kubenswrapper[4910]: I1125 21:51:24.604780 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" event={"ID":"960cc043-e25c-4a45-8272-c92d70a8b6f7","Type":"ContainerStarted","Data":"e9c73816c11a115b02c352da1a63d9cc44d5263496e0258d1dc81c11f01ed912"} Nov 25 21:51:25 crc kubenswrapper[4910]: I1125 21:51:25.221204 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b20e3e8-ac28-471d-82ed-e619a78a7c55" path="/var/lib/kubelet/pods/9b20e3e8-ac28-471d-82ed-e619a78a7c55/volumes" Nov 25 21:51:25 crc kubenswrapper[4910]: I1125 21:51:25.618253 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd","Type":"ContainerStarted","Data":"f8d9ece16af3d3308e5221d9e5fe3584afdf999926ac1764284f4b6975a07cb8"} Nov 25 21:51:25 crc kubenswrapper[4910]: I1125 21:51:25.621160 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" event={"ID":"960cc043-e25c-4a45-8272-c92d70a8b6f7","Type":"ContainerStarted","Data":"943c8ef3e1557f53d818e9f5e76e16b2f8d63f55ec21ece35a4748fb3599224d"} Nov 25 21:51:25 crc kubenswrapper[4910]: I1125 21:51:25.621421 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:25 crc kubenswrapper[4910]: I1125 21:51:25.678144 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" podStartSLOduration=3.678118644 podStartE2EDuration="3.678118644s" podCreationTimestamp="2025-11-25 21:51:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:51:25.670723262 +0000 UTC m=+1241.133199594" watchObservedRunningTime="2025-11-25 21:51:25.678118644 +0000 UTC m=+1241.140594966" Nov 25 21:51:26 crc kubenswrapper[4910]: I1125 21:51:26.572043 4910 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="58eca84e-dfac-4af7-ad45-241a776f81d6" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: i/o timeout" Nov 25 21:51:26 crc kubenswrapper[4910]: I1125 21:51:26.634210 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"bc2bbda0-2d3e-4794-bc13-21bca025c6fe","Type":"ContainerStarted","Data":"2e47eefdc4f5d9c8ca9e32296e0a42206e3b7c33f4a0722318380512a5090ba5"} Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.166499 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.249984 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-79prd"] Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.250312 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" podUID="2707588d-f101-4cbd-a3cb-cb6366cb0231" containerName="dnsmasq-dns" containerID="cri-o://c2d23beb2a8777214bfa1bf6da8f94e3a83fd1fc95e0f3478400c006ae36c99d" gracePeriod=10 Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.396718 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8c6f6df99-9qdqp"] Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.400332 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.433495 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8c6f6df99-9qdqp"] Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.458410 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-dns-swift-storage-0\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.458523 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqmsb\" (UniqueName: \"kubernetes.io/projected/fe3aae4c-2f2b-42be-b179-105323fa0957-kube-api-access-zqmsb\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.458567 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-config\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.458605 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-dns-svc\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.458641 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-ovsdbserver-nb\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.458930 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-ovsdbserver-sb\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.459071 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-openstack-edpm-ipam\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.564039 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-ovsdbserver-sb\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.564104 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-openstack-edpm-ipam\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.564155 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-dns-swift-storage-0\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.564212 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqmsb\" (UniqueName: \"kubernetes.io/projected/fe3aae4c-2f2b-42be-b179-105323fa0957-kube-api-access-zqmsb\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.564260 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-config\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.564299 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-dns-svc\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.564336 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-ovsdbserver-nb\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.565502 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-config\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.565947 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-ovsdbserver-nb\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.566376 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-ovsdbserver-sb\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.566508 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-dns-svc\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.566880 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-dns-swift-storage-0\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.569050 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/fe3aae4c-2f2b-42be-b179-105323fa0957-openstack-edpm-ipam\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.594096 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqmsb\" (UniqueName: \"kubernetes.io/projected/fe3aae4c-2f2b-42be-b179-105323fa0957-kube-api-access-zqmsb\") pod \"dnsmasq-dns-8c6f6df99-9qdqp\" (UID: \"fe3aae4c-2f2b-42be-b179-105323fa0957\") " pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.712716 4910 generic.go:334] "Generic (PLEG): container finished" podID="2707588d-f101-4cbd-a3cb-cb6366cb0231" containerID="c2d23beb2a8777214bfa1bf6da8f94e3a83fd1fc95e0f3478400c006ae36c99d" exitCode=0 Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.712797 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" event={"ID":"2707588d-f101-4cbd-a3cb-cb6366cb0231","Type":"ContainerDied","Data":"c2d23beb2a8777214bfa1bf6da8f94e3a83fd1fc95e0f3478400c006ae36c99d"} Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.713205 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" event={"ID":"2707588d-f101-4cbd-a3cb-cb6366cb0231","Type":"ContainerDied","Data":"844fed519bce5af34dd867957570bca138590b67f07186a02eabf054232527ed"} Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.713231 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="844fed519bce5af34dd867957570bca138590b67f07186a02eabf054232527ed" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.752425 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.833930 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.976001 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-dns-svc\") pod \"2707588d-f101-4cbd-a3cb-cb6366cb0231\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.976368 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-dns-swift-storage-0\") pod \"2707588d-f101-4cbd-a3cb-cb6366cb0231\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.976493 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmnx7\" (UniqueName: \"kubernetes.io/projected/2707588d-f101-4cbd-a3cb-cb6366cb0231-kube-api-access-mmnx7\") pod \"2707588d-f101-4cbd-a3cb-cb6366cb0231\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.976583 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-ovsdbserver-sb\") pod \"2707588d-f101-4cbd-a3cb-cb6366cb0231\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.976691 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-config\") pod \"2707588d-f101-4cbd-a3cb-cb6366cb0231\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.976887 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-ovsdbserver-nb\") pod \"2707588d-f101-4cbd-a3cb-cb6366cb0231\" (UID: \"2707588d-f101-4cbd-a3cb-cb6366cb0231\") " Nov 25 21:51:33 crc kubenswrapper[4910]: I1125 21:51:33.986031 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2707588d-f101-4cbd-a3cb-cb6366cb0231-kube-api-access-mmnx7" (OuterVolumeSpecName: "kube-api-access-mmnx7") pod "2707588d-f101-4cbd-a3cb-cb6366cb0231" (UID: "2707588d-f101-4cbd-a3cb-cb6366cb0231"). InnerVolumeSpecName "kube-api-access-mmnx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.030759 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2707588d-f101-4cbd-a3cb-cb6366cb0231" (UID: "2707588d-f101-4cbd-a3cb-cb6366cb0231"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.030836 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2707588d-f101-4cbd-a3cb-cb6366cb0231" (UID: "2707588d-f101-4cbd-a3cb-cb6366cb0231"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.043679 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "2707588d-f101-4cbd-a3cb-cb6366cb0231" (UID: "2707588d-f101-4cbd-a3cb-cb6366cb0231"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.061207 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "2707588d-f101-4cbd-a3cb-cb6366cb0231" (UID: "2707588d-f101-4cbd-a3cb-cb6366cb0231"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.063093 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-config" (OuterVolumeSpecName: "config") pod "2707588d-f101-4cbd-a3cb-cb6366cb0231" (UID: "2707588d-f101-4cbd-a3cb-cb6366cb0231"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.079774 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.079818 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.079831 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmnx7\" (UniqueName: \"kubernetes.io/projected/2707588d-f101-4cbd-a3cb-cb6366cb0231-kube-api-access-mmnx7\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.079842 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.079854 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.079864 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2707588d-f101-4cbd-a3cb-cb6366cb0231-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.277826 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8c6f6df99-9qdqp"] Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.723961 4910 generic.go:334] "Generic (PLEG): container finished" podID="fe3aae4c-2f2b-42be-b179-105323fa0957" containerID="68d921c2ce6a44d5798f9ac06650f768ac90905a4e46253663a194ac628e358a" exitCode=0 Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.724074 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6c5df9-79prd" Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.724070 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" event={"ID":"fe3aae4c-2f2b-42be-b179-105323fa0957","Type":"ContainerDied","Data":"68d921c2ce6a44d5798f9ac06650f768ac90905a4e46253663a194ac628e358a"} Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.724144 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" event={"ID":"fe3aae4c-2f2b-42be-b179-105323fa0957","Type":"ContainerStarted","Data":"53c2c8c88d60289813ec940d5377b2003c44e8b274e52e8237bbf0d19bce5603"} Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.910623 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-79prd"] Nov 25 21:51:34 crc kubenswrapper[4910]: I1125 21:51:34.920020 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c7b6c5df9-79prd"] Nov 25 21:51:35 crc kubenswrapper[4910]: I1125 21:51:35.218939 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2707588d-f101-4cbd-a3cb-cb6366cb0231" path="/var/lib/kubelet/pods/2707588d-f101-4cbd-a3cb-cb6366cb0231/volumes" Nov 25 21:51:35 crc kubenswrapper[4910]: I1125 21:51:35.735650 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" event={"ID":"fe3aae4c-2f2b-42be-b179-105323fa0957","Type":"ContainerStarted","Data":"f9c9617676c725d16363e4c1f13b0fb1590bed2413f2a15298dc9f56995061bc"} Nov 25 21:51:35 crc kubenswrapper[4910]: I1125 21:51:35.736269 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:35 crc kubenswrapper[4910]: I1125 21:51:35.762958 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" podStartSLOduration=2.76293784 podStartE2EDuration="2.76293784s" podCreationTimestamp="2025-11-25 21:51:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:51:35.756213314 +0000 UTC m=+1251.218689636" watchObservedRunningTime="2025-11-25 21:51:35.76293784 +0000 UTC m=+1251.225414162" Nov 25 21:51:43 crc kubenswrapper[4910]: I1125 21:51:43.754599 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8c6f6df99-9qdqp" Nov 25 21:51:43 crc kubenswrapper[4910]: I1125 21:51:43.849148 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-kf4tg"] Nov 25 21:51:43 crc kubenswrapper[4910]: I1125 21:51:43.853613 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" podUID="960cc043-e25c-4a45-8272-c92d70a8b6f7" containerName="dnsmasq-dns" containerID="cri-o://943c8ef3e1557f53d818e9f5e76e16b2f8d63f55ec21ece35a4748fb3599224d" gracePeriod=10 Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.422468 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.510535 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-openstack-edpm-ipam\") pod \"960cc043-e25c-4a45-8272-c92d70a8b6f7\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.510694 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-ovsdbserver-sb\") pod \"960cc043-e25c-4a45-8272-c92d70a8b6f7\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.510729 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-dns-svc\") pod \"960cc043-e25c-4a45-8272-c92d70a8b6f7\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.510917 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-config\") pod \"960cc043-e25c-4a45-8272-c92d70a8b6f7\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.510977 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9g9w\" (UniqueName: \"kubernetes.io/projected/960cc043-e25c-4a45-8272-c92d70a8b6f7-kube-api-access-m9g9w\") pod \"960cc043-e25c-4a45-8272-c92d70a8b6f7\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.511065 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-dns-swift-storage-0\") pod \"960cc043-e25c-4a45-8272-c92d70a8b6f7\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.511090 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-ovsdbserver-nb\") pod \"960cc043-e25c-4a45-8272-c92d70a8b6f7\" (UID: \"960cc043-e25c-4a45-8272-c92d70a8b6f7\") " Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.520558 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/960cc043-e25c-4a45-8272-c92d70a8b6f7-kube-api-access-m9g9w" (OuterVolumeSpecName: "kube-api-access-m9g9w") pod "960cc043-e25c-4a45-8272-c92d70a8b6f7" (UID: "960cc043-e25c-4a45-8272-c92d70a8b6f7"). InnerVolumeSpecName "kube-api-access-m9g9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.581987 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "960cc043-e25c-4a45-8272-c92d70a8b6f7" (UID: "960cc043-e25c-4a45-8272-c92d70a8b6f7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.582235 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "960cc043-e25c-4a45-8272-c92d70a8b6f7" (UID: "960cc043-e25c-4a45-8272-c92d70a8b6f7"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.596441 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "960cc043-e25c-4a45-8272-c92d70a8b6f7" (UID: "960cc043-e25c-4a45-8272-c92d70a8b6f7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.598800 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "960cc043-e25c-4a45-8272-c92d70a8b6f7" (UID: "960cc043-e25c-4a45-8272-c92d70a8b6f7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.605178 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-config" (OuterVolumeSpecName: "config") pod "960cc043-e25c-4a45-8272-c92d70a8b6f7" (UID: "960cc043-e25c-4a45-8272-c92d70a8b6f7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.606194 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "960cc043-e25c-4a45-8272-c92d70a8b6f7" (UID: "960cc043-e25c-4a45-8272-c92d70a8b6f7"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.614371 4910 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.614417 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.614429 4910 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.614439 4910 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-config\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.614448 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9g9w\" (UniqueName: \"kubernetes.io/projected/960cc043-e25c-4a45-8272-c92d70a8b6f7-kube-api-access-m9g9w\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.614459 4910 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.614468 4910 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/960cc043-e25c-4a45-8272-c92d70a8b6f7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.851932 4910 generic.go:334] "Generic (PLEG): container finished" podID="960cc043-e25c-4a45-8272-c92d70a8b6f7" containerID="943c8ef3e1557f53d818e9f5e76e16b2f8d63f55ec21ece35a4748fb3599224d" exitCode=0 Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.852060 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.852035 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" event={"ID":"960cc043-e25c-4a45-8272-c92d70a8b6f7","Type":"ContainerDied","Data":"943c8ef3e1557f53d818e9f5e76e16b2f8d63f55ec21ece35a4748fb3599224d"} Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.852267 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5576978c7c-kf4tg" event={"ID":"960cc043-e25c-4a45-8272-c92d70a8b6f7","Type":"ContainerDied","Data":"e9c73816c11a115b02c352da1a63d9cc44d5263496e0258d1dc81c11f01ed912"} Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.852293 4910 scope.go:117] "RemoveContainer" containerID="943c8ef3e1557f53d818e9f5e76e16b2f8d63f55ec21ece35a4748fb3599224d" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.881081 4910 scope.go:117] "RemoveContainer" containerID="0c2fadfefb7e4c1f3a61f5d5375f0f018ecfd4ed6cfe301efa428c6048899643" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.900391 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-kf4tg"] Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.907402 4910 scope.go:117] "RemoveContainer" containerID="943c8ef3e1557f53d818e9f5e76e16b2f8d63f55ec21ece35a4748fb3599224d" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.907780 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5576978c7c-kf4tg"] Nov 25 21:51:44 crc kubenswrapper[4910]: E1125 21:51:44.907862 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"943c8ef3e1557f53d818e9f5e76e16b2f8d63f55ec21ece35a4748fb3599224d\": container with ID starting with 943c8ef3e1557f53d818e9f5e76e16b2f8d63f55ec21ece35a4748fb3599224d not found: ID does not exist" containerID="943c8ef3e1557f53d818e9f5e76e16b2f8d63f55ec21ece35a4748fb3599224d" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.907935 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"943c8ef3e1557f53d818e9f5e76e16b2f8d63f55ec21ece35a4748fb3599224d"} err="failed to get container status \"943c8ef3e1557f53d818e9f5e76e16b2f8d63f55ec21ece35a4748fb3599224d\": rpc error: code = NotFound desc = could not find container \"943c8ef3e1557f53d818e9f5e76e16b2f8d63f55ec21ece35a4748fb3599224d\": container with ID starting with 943c8ef3e1557f53d818e9f5e76e16b2f8d63f55ec21ece35a4748fb3599224d not found: ID does not exist" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.907970 4910 scope.go:117] "RemoveContainer" containerID="0c2fadfefb7e4c1f3a61f5d5375f0f018ecfd4ed6cfe301efa428c6048899643" Nov 25 21:51:44 crc kubenswrapper[4910]: E1125 21:51:44.908395 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0c2fadfefb7e4c1f3a61f5d5375f0f018ecfd4ed6cfe301efa428c6048899643\": container with ID starting with 0c2fadfefb7e4c1f3a61f5d5375f0f018ecfd4ed6cfe301efa428c6048899643 not found: ID does not exist" containerID="0c2fadfefb7e4c1f3a61f5d5375f0f018ecfd4ed6cfe301efa428c6048899643" Nov 25 21:51:44 crc kubenswrapper[4910]: I1125 21:51:44.908475 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0c2fadfefb7e4c1f3a61f5d5375f0f018ecfd4ed6cfe301efa428c6048899643"} err="failed to get container status \"0c2fadfefb7e4c1f3a61f5d5375f0f018ecfd4ed6cfe301efa428c6048899643\": rpc error: code = NotFound desc = could not find container \"0c2fadfefb7e4c1f3a61f5d5375f0f018ecfd4ed6cfe301efa428c6048899643\": container with ID starting with 0c2fadfefb7e4c1f3a61f5d5375f0f018ecfd4ed6cfe301efa428c6048899643 not found: ID does not exist" Nov 25 21:51:45 crc kubenswrapper[4910]: I1125 21:51:45.221890 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="960cc043-e25c-4a45-8272-c92d70a8b6f7" path="/var/lib/kubelet/pods/960cc043-e25c-4a45-8272-c92d70a8b6f7/volumes" Nov 25 21:51:53 crc kubenswrapper[4910]: I1125 21:51:53.099700 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:51:53 crc kubenswrapper[4910]: I1125 21:51:53.100417 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:51:59 crc kubenswrapper[4910]: I1125 21:51:59.035765 4910 generic.go:334] "Generic (PLEG): container finished" podID="9d06ec4c-6e1e-4fc9-9e41-59857b4494fd" containerID="f8d9ece16af3d3308e5221d9e5fe3584afdf999926ac1764284f4b6975a07cb8" exitCode=0 Nov 25 21:51:59 crc kubenswrapper[4910]: I1125 21:51:59.035862 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd","Type":"ContainerDied","Data":"f8d9ece16af3d3308e5221d9e5fe3584afdf999926ac1764284f4b6975a07cb8"} Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.047109 4910 generic.go:334] "Generic (PLEG): container finished" podID="bc2bbda0-2d3e-4794-bc13-21bca025c6fe" containerID="2e47eefdc4f5d9c8ca9e32296e0a42206e3b7c33f4a0722318380512a5090ba5" exitCode=0 Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.047227 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"bc2bbda0-2d3e-4794-bc13-21bca025c6fe","Type":"ContainerDied","Data":"2e47eefdc4f5d9c8ca9e32296e0a42206e3b7c33f4a0722318380512a5090ba5"} Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.051939 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"9d06ec4c-6e1e-4fc9-9e41-59857b4494fd","Type":"ContainerStarted","Data":"fc33dd38860181b28a9b74df6cb54b6be887e0f44baa3ffb6f8cb8447816b15f"} Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.052129 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.120273 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.12020372 podStartE2EDuration="38.12020372s" podCreationTimestamp="2025-11-25 21:51:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:52:00.119516322 +0000 UTC m=+1275.581992664" watchObservedRunningTime="2025-11-25 21:52:00.12020372 +0000 UTC m=+1275.582680052" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.521753 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv"] Nov 25 21:52:00 crc kubenswrapper[4910]: E1125 21:52:00.522324 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="960cc043-e25c-4a45-8272-c92d70a8b6f7" containerName="init" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.522348 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="960cc043-e25c-4a45-8272-c92d70a8b6f7" containerName="init" Nov 25 21:52:00 crc kubenswrapper[4910]: E1125 21:52:00.522385 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2707588d-f101-4cbd-a3cb-cb6366cb0231" containerName="dnsmasq-dns" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.522395 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2707588d-f101-4cbd-a3cb-cb6366cb0231" containerName="dnsmasq-dns" Nov 25 21:52:00 crc kubenswrapper[4910]: E1125 21:52:00.522425 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="960cc043-e25c-4a45-8272-c92d70a8b6f7" containerName="dnsmasq-dns" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.522435 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="960cc043-e25c-4a45-8272-c92d70a8b6f7" containerName="dnsmasq-dns" Nov 25 21:52:00 crc kubenswrapper[4910]: E1125 21:52:00.522457 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2707588d-f101-4cbd-a3cb-cb6366cb0231" containerName="init" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.522467 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2707588d-f101-4cbd-a3cb-cb6366cb0231" containerName="init" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.522709 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2707588d-f101-4cbd-a3cb-cb6366cb0231" containerName="dnsmasq-dns" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.522741 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="960cc043-e25c-4a45-8272-c92d70a8b6f7" containerName="dnsmasq-dns" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.523629 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.528574 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.528837 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.528988 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.529140 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.543943 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv"] Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.607715 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5jmh\" (UniqueName: \"kubernetes.io/projected/9cd37c71-fc60-4099-a183-6f9e8a918e1e-kube-api-access-x5jmh\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.607778 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.607878 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.607925 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.710966 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.711036 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.711152 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5jmh\" (UniqueName: \"kubernetes.io/projected/9cd37c71-fc60-4099-a183-6f9e8a918e1e-kube-api-access-x5jmh\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.711213 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.720359 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.720566 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.721061 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.736449 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5jmh\" (UniqueName: \"kubernetes.io/projected/9cd37c71-fc60-4099-a183-6f9e8a918e1e-kube-api-access-x5jmh\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:00 crc kubenswrapper[4910]: I1125 21:52:00.873857 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:01 crc kubenswrapper[4910]: I1125 21:52:01.077527 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"bc2bbda0-2d3e-4794-bc13-21bca025c6fe","Type":"ContainerStarted","Data":"df5576edd242f4351bc437e1e99d26daee652fe6229a4a6a1191d9499ef6ef85"} Nov 25 21:52:01 crc kubenswrapper[4910]: I1125 21:52:01.078383 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:52:01 crc kubenswrapper[4910]: I1125 21:52:01.113490 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.113464701 podStartE2EDuration="38.113464701s" podCreationTimestamp="2025-11-25 21:51:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 21:52:01.110997826 +0000 UTC m=+1276.573474168" watchObservedRunningTime="2025-11-25 21:52:01.113464701 +0000 UTC m=+1276.575941023" Nov 25 21:52:01 crc kubenswrapper[4910]: I1125 21:52:01.516023 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv"] Nov 25 21:52:01 crc kubenswrapper[4910]: W1125 21:52:01.529474 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9cd37c71_fc60_4099_a183_6f9e8a918e1e.slice/crio-c8facce95dd146d6d738ce28909e6903a370e1b519063e0a4a0a4bfaa98234bf WatchSource:0}: Error finding container c8facce95dd146d6d738ce28909e6903a370e1b519063e0a4a0a4bfaa98234bf: Status 404 returned error can't find the container with id c8facce95dd146d6d738ce28909e6903a370e1b519063e0a4a0a4bfaa98234bf Nov 25 21:52:02 crc kubenswrapper[4910]: I1125 21:52:02.090010 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" event={"ID":"9cd37c71-fc60-4099-a183-6f9e8a918e1e","Type":"ContainerStarted","Data":"c8facce95dd146d6d738ce28909e6903a370e1b519063e0a4a0a4bfaa98234bf"} Nov 25 21:52:11 crc kubenswrapper[4910]: I1125 21:52:11.187620 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" event={"ID":"9cd37c71-fc60-4099-a183-6f9e8a918e1e","Type":"ContainerStarted","Data":"4ea7b3c929cacd8c70ec6f5ac8b7212f090c964f4fe2dfb5c1d3c4e1fe9d179b"} Nov 25 21:52:11 crc kubenswrapper[4910]: I1125 21:52:11.213236 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" podStartSLOduration=2.010704806 podStartE2EDuration="11.213218026s" podCreationTimestamp="2025-11-25 21:52:00 +0000 UTC" firstStartedPulling="2025-11-25 21:52:01.532154767 +0000 UTC m=+1276.994631089" lastFinishedPulling="2025-11-25 21:52:10.734667987 +0000 UTC m=+1286.197144309" observedRunningTime="2025-11-25 21:52:11.205923875 +0000 UTC m=+1286.668400267" watchObservedRunningTime="2025-11-25 21:52:11.213218026 +0000 UTC m=+1286.675694348" Nov 25 21:52:13 crc kubenswrapper[4910]: I1125 21:52:13.154564 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 25 21:52:13 crc kubenswrapper[4910]: I1125 21:52:13.999431 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 25 21:52:23 crc kubenswrapper[4910]: I1125 21:52:23.098848 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:52:23 crc kubenswrapper[4910]: I1125 21:52:23.099391 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:52:23 crc kubenswrapper[4910]: I1125 21:52:23.099442 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:52:23 crc kubenswrapper[4910]: I1125 21:52:23.100349 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7b6cad0a631fec9eb58a49d3ce7f8c662dc70b5fec077e96f2c93ef3235ee8bf"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 21:52:23 crc kubenswrapper[4910]: I1125 21:52:23.100409 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://7b6cad0a631fec9eb58a49d3ce7f8c662dc70b5fec077e96f2c93ef3235ee8bf" gracePeriod=600 Nov 25 21:52:23 crc kubenswrapper[4910]: I1125 21:52:23.361444 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="7b6cad0a631fec9eb58a49d3ce7f8c662dc70b5fec077e96f2c93ef3235ee8bf" exitCode=0 Nov 25 21:52:23 crc kubenswrapper[4910]: I1125 21:52:23.361540 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"7b6cad0a631fec9eb58a49d3ce7f8c662dc70b5fec077e96f2c93ef3235ee8bf"} Nov 25 21:52:23 crc kubenswrapper[4910]: I1125 21:52:23.361656 4910 scope.go:117] "RemoveContainer" containerID="0b0066f1a169222bc8e764ac54716c8dcd57922f8eb880531d5e609e43cc685c" Nov 25 21:52:23 crc kubenswrapper[4910]: I1125 21:52:23.364729 4910 generic.go:334] "Generic (PLEG): container finished" podID="9cd37c71-fc60-4099-a183-6f9e8a918e1e" containerID="4ea7b3c929cacd8c70ec6f5ac8b7212f090c964f4fe2dfb5c1d3c4e1fe9d179b" exitCode=0 Nov 25 21:52:23 crc kubenswrapper[4910]: I1125 21:52:23.364804 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" event={"ID":"9cd37c71-fc60-4099-a183-6f9e8a918e1e","Type":"ContainerDied","Data":"4ea7b3c929cacd8c70ec6f5ac8b7212f090c964f4fe2dfb5c1d3c4e1fe9d179b"} Nov 25 21:52:24 crc kubenswrapper[4910]: I1125 21:52:24.380482 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a"} Nov 25 21:52:24 crc kubenswrapper[4910]: I1125 21:52:24.872836 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.017801 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-ssh-key\") pod \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.017855 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-inventory\") pod \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.017887 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-repo-setup-combined-ca-bundle\") pod \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.017935 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5jmh\" (UniqueName: \"kubernetes.io/projected/9cd37c71-fc60-4099-a183-6f9e8a918e1e-kube-api-access-x5jmh\") pod \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\" (UID: \"9cd37c71-fc60-4099-a183-6f9e8a918e1e\") " Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.025201 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9cd37c71-fc60-4099-a183-6f9e8a918e1e-kube-api-access-x5jmh" (OuterVolumeSpecName: "kube-api-access-x5jmh") pod "9cd37c71-fc60-4099-a183-6f9e8a918e1e" (UID: "9cd37c71-fc60-4099-a183-6f9e8a918e1e"). InnerVolumeSpecName "kube-api-access-x5jmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.028491 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "9cd37c71-fc60-4099-a183-6f9e8a918e1e" (UID: "9cd37c71-fc60-4099-a183-6f9e8a918e1e"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.057382 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "9cd37c71-fc60-4099-a183-6f9e8a918e1e" (UID: "9cd37c71-fc60-4099-a183-6f9e8a918e1e"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.057993 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-inventory" (OuterVolumeSpecName: "inventory") pod "9cd37c71-fc60-4099-a183-6f9e8a918e1e" (UID: "9cd37c71-fc60-4099-a183-6f9e8a918e1e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.121137 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.121174 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.121191 4910 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9cd37c71-fc60-4099-a183-6f9e8a918e1e-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.121205 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5jmh\" (UniqueName: \"kubernetes.io/projected/9cd37c71-fc60-4099-a183-6f9e8a918e1e-kube-api-access-x5jmh\") on node \"crc\" DevicePath \"\"" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.390624 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.390619 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv" event={"ID":"9cd37c71-fc60-4099-a183-6f9e8a918e1e","Type":"ContainerDied","Data":"c8facce95dd146d6d738ce28909e6903a370e1b519063e0a4a0a4bfaa98234bf"} Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.390695 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c8facce95dd146d6d738ce28909e6903a370e1b519063e0a4a0a4bfaa98234bf" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.534192 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2"] Nov 25 21:52:25 crc kubenswrapper[4910]: E1125 21:52:25.535317 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9cd37c71-fc60-4099-a183-6f9e8a918e1e" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.535812 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9cd37c71-fc60-4099-a183-6f9e8a918e1e" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.536092 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="9cd37c71-fc60-4099-a183-6f9e8a918e1e" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.537137 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.550302 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.550372 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.550519 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.550571 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.553326 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2"] Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.631948 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/444f986e-2346-419a-a78e-584196602880-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4z9c2\" (UID: \"444f986e-2346-419a-a78e-584196602880\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.632044 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9hpw\" (UniqueName: \"kubernetes.io/projected/444f986e-2346-419a-a78e-584196602880-kube-api-access-p9hpw\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4z9c2\" (UID: \"444f986e-2346-419a-a78e-584196602880\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.632086 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/444f986e-2346-419a-a78e-584196602880-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4z9c2\" (UID: \"444f986e-2346-419a-a78e-584196602880\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.734505 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/444f986e-2346-419a-a78e-584196602880-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4z9c2\" (UID: \"444f986e-2346-419a-a78e-584196602880\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.734653 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9hpw\" (UniqueName: \"kubernetes.io/projected/444f986e-2346-419a-a78e-584196602880-kube-api-access-p9hpw\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4z9c2\" (UID: \"444f986e-2346-419a-a78e-584196602880\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.735200 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/444f986e-2346-419a-a78e-584196602880-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4z9c2\" (UID: \"444f986e-2346-419a-a78e-584196602880\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.742955 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/444f986e-2346-419a-a78e-584196602880-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4z9c2\" (UID: \"444f986e-2346-419a-a78e-584196602880\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.743024 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/444f986e-2346-419a-a78e-584196602880-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4z9c2\" (UID: \"444f986e-2346-419a-a78e-584196602880\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.754199 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9hpw\" (UniqueName: \"kubernetes.io/projected/444f986e-2346-419a-a78e-584196602880-kube-api-access-p9hpw\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-4z9c2\" (UID: \"444f986e-2346-419a-a78e-584196602880\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" Nov 25 21:52:25 crc kubenswrapper[4910]: I1125 21:52:25.875851 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" Nov 25 21:52:26 crc kubenswrapper[4910]: I1125 21:52:26.522124 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2"] Nov 25 21:52:26 crc kubenswrapper[4910]: W1125 21:52:26.523550 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod444f986e_2346_419a_a78e_584196602880.slice/crio-fe0c39f3ba8b129d02f5b441e2473f259d2c2428fc1b6c55e74646e445c44f41 WatchSource:0}: Error finding container fe0c39f3ba8b129d02f5b441e2473f259d2c2428fc1b6c55e74646e445c44f41: Status 404 returned error can't find the container with id fe0c39f3ba8b129d02f5b441e2473f259d2c2428fc1b6c55e74646e445c44f41 Nov 25 21:52:27 crc kubenswrapper[4910]: I1125 21:52:27.421644 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" event={"ID":"444f986e-2346-419a-a78e-584196602880","Type":"ContainerStarted","Data":"c5ed13bf7f72beb5a03372447f9d866a5f9af26b49ee76ab3a2ae8164bbe5424"} Nov 25 21:52:27 crc kubenswrapper[4910]: I1125 21:52:27.422308 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" event={"ID":"444f986e-2346-419a-a78e-584196602880","Type":"ContainerStarted","Data":"fe0c39f3ba8b129d02f5b441e2473f259d2c2428fc1b6c55e74646e445c44f41"} Nov 25 21:52:27 crc kubenswrapper[4910]: I1125 21:52:27.442030 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" podStartSLOduration=2.029663455 podStartE2EDuration="2.442008566s" podCreationTimestamp="2025-11-25 21:52:25 +0000 UTC" firstStartedPulling="2025-11-25 21:52:26.526218427 +0000 UTC m=+1301.988694749" lastFinishedPulling="2025-11-25 21:52:26.938563538 +0000 UTC m=+1302.401039860" observedRunningTime="2025-11-25 21:52:27.439818909 +0000 UTC m=+1302.902295261" watchObservedRunningTime="2025-11-25 21:52:27.442008566 +0000 UTC m=+1302.904484898" Nov 25 21:52:30 crc kubenswrapper[4910]: I1125 21:52:30.454361 4910 generic.go:334] "Generic (PLEG): container finished" podID="444f986e-2346-419a-a78e-584196602880" containerID="c5ed13bf7f72beb5a03372447f9d866a5f9af26b49ee76ab3a2ae8164bbe5424" exitCode=0 Nov 25 21:52:30 crc kubenswrapper[4910]: I1125 21:52:30.454459 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" event={"ID":"444f986e-2346-419a-a78e-584196602880","Type":"ContainerDied","Data":"c5ed13bf7f72beb5a03372447f9d866a5f9af26b49ee76ab3a2ae8164bbe5424"} Nov 25 21:52:31 crc kubenswrapper[4910]: I1125 21:52:31.989292 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.085132 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/444f986e-2346-419a-a78e-584196602880-inventory\") pod \"444f986e-2346-419a-a78e-584196602880\" (UID: \"444f986e-2346-419a-a78e-584196602880\") " Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.085296 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9hpw\" (UniqueName: \"kubernetes.io/projected/444f986e-2346-419a-a78e-584196602880-kube-api-access-p9hpw\") pod \"444f986e-2346-419a-a78e-584196602880\" (UID: \"444f986e-2346-419a-a78e-584196602880\") " Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.085444 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/444f986e-2346-419a-a78e-584196602880-ssh-key\") pod \"444f986e-2346-419a-a78e-584196602880\" (UID: \"444f986e-2346-419a-a78e-584196602880\") " Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.092607 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/444f986e-2346-419a-a78e-584196602880-kube-api-access-p9hpw" (OuterVolumeSpecName: "kube-api-access-p9hpw") pod "444f986e-2346-419a-a78e-584196602880" (UID: "444f986e-2346-419a-a78e-584196602880"). InnerVolumeSpecName "kube-api-access-p9hpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.120353 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/444f986e-2346-419a-a78e-584196602880-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "444f986e-2346-419a-a78e-584196602880" (UID: "444f986e-2346-419a-a78e-584196602880"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.142484 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/444f986e-2346-419a-a78e-584196602880-inventory" (OuterVolumeSpecName: "inventory") pod "444f986e-2346-419a-a78e-584196602880" (UID: "444f986e-2346-419a-a78e-584196602880"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.190788 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/444f986e-2346-419a-a78e-584196602880-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.190839 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/444f986e-2346-419a-a78e-584196602880-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.190924 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9hpw\" (UniqueName: \"kubernetes.io/projected/444f986e-2346-419a-a78e-584196602880-kube-api-access-p9hpw\") on node \"crc\" DevicePath \"\"" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.480704 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" event={"ID":"444f986e-2346-419a-a78e-584196602880","Type":"ContainerDied","Data":"fe0c39f3ba8b129d02f5b441e2473f259d2c2428fc1b6c55e74646e445c44f41"} Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.481075 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe0c39f3ba8b129d02f5b441e2473f259d2c2428fc1b6c55e74646e445c44f41" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.480830 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-4z9c2" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.582194 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh"] Nov 25 21:52:32 crc kubenswrapper[4910]: E1125 21:52:32.582749 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="444f986e-2346-419a-a78e-584196602880" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.582773 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="444f986e-2346-419a-a78e-584196602880" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.582962 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="444f986e-2346-419a-a78e-584196602880" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.583637 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.586654 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.587358 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.588088 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.590573 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.612531 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh"] Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.701258 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4zvw\" (UniqueName: \"kubernetes.io/projected/6fbaf31f-bfe9-4f0a-a064-75d015480249-kube-api-access-q4zvw\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.701447 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.701683 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.701821 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.803665 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4zvw\" (UniqueName: \"kubernetes.io/projected/6fbaf31f-bfe9-4f0a-a064-75d015480249-kube-api-access-q4zvw\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.803800 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.803953 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.804079 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.809745 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.809908 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.812160 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.823636 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4zvw\" (UniqueName: \"kubernetes.io/projected/6fbaf31f-bfe9-4f0a-a064-75d015480249-kube-api-access-q4zvw\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:52:32 crc kubenswrapper[4910]: I1125 21:52:32.912781 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:52:33 crc kubenswrapper[4910]: I1125 21:52:33.589983 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh"] Nov 25 21:52:34 crc kubenswrapper[4910]: I1125 21:52:34.504092 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" event={"ID":"6fbaf31f-bfe9-4f0a-a064-75d015480249","Type":"ContainerStarted","Data":"746988820a64cee990defcbd42892fbeace3fa4595e78a4b68fdde15bc6e4121"} Nov 25 21:52:34 crc kubenswrapper[4910]: I1125 21:52:34.504704 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" event={"ID":"6fbaf31f-bfe9-4f0a-a064-75d015480249","Type":"ContainerStarted","Data":"3e13d6808af9fcf2ca06592293e92615ab4defe5b8da8623b641db97a98583c6"} Nov 25 21:52:34 crc kubenswrapper[4910]: I1125 21:52:34.551215 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" podStartSLOduration=2.088776081 podStartE2EDuration="2.551179248s" podCreationTimestamp="2025-11-25 21:52:32 +0000 UTC" firstStartedPulling="2025-11-25 21:52:33.602807159 +0000 UTC m=+1309.065283481" lastFinishedPulling="2025-11-25 21:52:34.065210326 +0000 UTC m=+1309.527686648" observedRunningTime="2025-11-25 21:52:34.527981552 +0000 UTC m=+1309.990457914" watchObservedRunningTime="2025-11-25 21:52:34.551179248 +0000 UTC m=+1310.013655610" Nov 25 21:52:52 crc kubenswrapper[4910]: I1125 21:52:52.255046 4910 scope.go:117] "RemoveContainer" containerID="8e96aacdf07d0b27e96b90e76b88ed369713b57159939705eb1e0e2a335f581a" Nov 25 21:52:52 crc kubenswrapper[4910]: I1125 21:52:52.281416 4910 scope.go:117] "RemoveContainer" containerID="36f5cd42b631fd69c9e5846f353e8f21ae91761d6f2cb3d74208d623a99873e2" Nov 25 21:52:52 crc kubenswrapper[4910]: I1125 21:52:52.329302 4910 scope.go:117] "RemoveContainer" containerID="e232ffbaf837378206e41067e3de37b088fac4e91500257e7ba0ca8131633841" Nov 25 21:53:52 crc kubenswrapper[4910]: I1125 21:53:52.439472 4910 scope.go:117] "RemoveContainer" containerID="f3d49d6ebcb61f7bf1289baffc3034331ccbfda0ff3a34702beed6e0c21595be" Nov 25 21:54:23 crc kubenswrapper[4910]: I1125 21:54:23.098939 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:54:23 crc kubenswrapper[4910]: I1125 21:54:23.100054 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:54:53 crc kubenswrapper[4910]: I1125 21:54:53.099012 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:54:53 crc kubenswrapper[4910]: I1125 21:54:53.099867 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:55:00 crc kubenswrapper[4910]: I1125 21:55:00.730717 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gllm2"] Nov 25 21:55:00 crc kubenswrapper[4910]: I1125 21:55:00.735292 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:00 crc kubenswrapper[4910]: I1125 21:55:00.746822 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gllm2"] Nov 25 21:55:00 crc kubenswrapper[4910]: I1125 21:55:00.810534 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-utilities\") pod \"certified-operators-gllm2\" (UID: \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\") " pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:00 crc kubenswrapper[4910]: I1125 21:55:00.810818 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-catalog-content\") pod \"certified-operators-gllm2\" (UID: \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\") " pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:00 crc kubenswrapper[4910]: I1125 21:55:00.810981 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-br4sp\" (UniqueName: \"kubernetes.io/projected/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-kube-api-access-br4sp\") pod \"certified-operators-gllm2\" (UID: \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\") " pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:00 crc kubenswrapper[4910]: I1125 21:55:00.914642 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-utilities\") pod \"certified-operators-gllm2\" (UID: \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\") " pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:00 crc kubenswrapper[4910]: I1125 21:55:00.914801 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-catalog-content\") pod \"certified-operators-gllm2\" (UID: \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\") " pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:00 crc kubenswrapper[4910]: I1125 21:55:00.914875 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-br4sp\" (UniqueName: \"kubernetes.io/projected/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-kube-api-access-br4sp\") pod \"certified-operators-gllm2\" (UID: \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\") " pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:00 crc kubenswrapper[4910]: I1125 21:55:00.915790 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-utilities\") pod \"certified-operators-gllm2\" (UID: \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\") " pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:00 crc kubenswrapper[4910]: I1125 21:55:00.916429 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-catalog-content\") pod \"certified-operators-gllm2\" (UID: \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\") " pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:00 crc kubenswrapper[4910]: I1125 21:55:00.937567 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-br4sp\" (UniqueName: \"kubernetes.io/projected/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-kube-api-access-br4sp\") pod \"certified-operators-gllm2\" (UID: \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\") " pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:01 crc kubenswrapper[4910]: I1125 21:55:01.064638 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:01 crc kubenswrapper[4910]: I1125 21:55:01.611762 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gllm2"] Nov 25 21:55:02 crc kubenswrapper[4910]: I1125 21:55:02.412235 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gllm2" event={"ID":"4c1f7391-bfee-48fc-8d5e-ab2620f154e7","Type":"ContainerDied","Data":"5cc2f0ef175bf960d7aced67f69f0665b0513bfee4c842d3cb02cf52a0dfe5ff"} Nov 25 21:55:02 crc kubenswrapper[4910]: I1125 21:55:02.412155 4910 generic.go:334] "Generic (PLEG): container finished" podID="4c1f7391-bfee-48fc-8d5e-ab2620f154e7" containerID="5cc2f0ef175bf960d7aced67f69f0665b0513bfee4c842d3cb02cf52a0dfe5ff" exitCode=0 Nov 25 21:55:02 crc kubenswrapper[4910]: I1125 21:55:02.412897 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gllm2" event={"ID":"4c1f7391-bfee-48fc-8d5e-ab2620f154e7","Type":"ContainerStarted","Data":"e1277b2729396e29c162378ef58d64205acf8aa4b446f4d10eae684280f05d8b"} Nov 25 21:55:03 crc kubenswrapper[4910]: I1125 21:55:03.426617 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gllm2" event={"ID":"4c1f7391-bfee-48fc-8d5e-ab2620f154e7","Type":"ContainerStarted","Data":"c29ccb1e1b4d300bd6ac86e358ce95ebebb3ab930880d8ec380c667a0ee67397"} Nov 25 21:55:04 crc kubenswrapper[4910]: I1125 21:55:04.453770 4910 generic.go:334] "Generic (PLEG): container finished" podID="4c1f7391-bfee-48fc-8d5e-ab2620f154e7" containerID="c29ccb1e1b4d300bd6ac86e358ce95ebebb3ab930880d8ec380c667a0ee67397" exitCode=0 Nov 25 21:55:04 crc kubenswrapper[4910]: I1125 21:55:04.453823 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gllm2" event={"ID":"4c1f7391-bfee-48fc-8d5e-ab2620f154e7","Type":"ContainerDied","Data":"c29ccb1e1b4d300bd6ac86e358ce95ebebb3ab930880d8ec380c667a0ee67397"} Nov 25 21:55:06 crc kubenswrapper[4910]: I1125 21:55:06.486546 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gllm2" event={"ID":"4c1f7391-bfee-48fc-8d5e-ab2620f154e7","Type":"ContainerStarted","Data":"8dc8d29d05f75261e274507be9f9700a871f3012d0a3ae9835b2ea6d794e930f"} Nov 25 21:55:06 crc kubenswrapper[4910]: I1125 21:55:06.534852 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gllm2" podStartSLOduration=3.717411356 podStartE2EDuration="6.53481649s" podCreationTimestamp="2025-11-25 21:55:00 +0000 UTC" firstStartedPulling="2025-11-25 21:55:02.416290291 +0000 UTC m=+1457.878766613" lastFinishedPulling="2025-11-25 21:55:05.233695415 +0000 UTC m=+1460.696171747" observedRunningTime="2025-11-25 21:55:06.518425361 +0000 UTC m=+1461.980901693" watchObservedRunningTime="2025-11-25 21:55:06.53481649 +0000 UTC m=+1461.997292842" Nov 25 21:55:11 crc kubenswrapper[4910]: I1125 21:55:11.065209 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:11 crc kubenswrapper[4910]: I1125 21:55:11.066453 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:11 crc kubenswrapper[4910]: I1125 21:55:11.150037 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:11 crc kubenswrapper[4910]: I1125 21:55:11.649351 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:11 crc kubenswrapper[4910]: I1125 21:55:11.736071 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gllm2"] Nov 25 21:55:13 crc kubenswrapper[4910]: I1125 21:55:13.584530 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gllm2" podUID="4c1f7391-bfee-48fc-8d5e-ab2620f154e7" containerName="registry-server" containerID="cri-o://8dc8d29d05f75261e274507be9f9700a871f3012d0a3ae9835b2ea6d794e930f" gracePeriod=2 Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.100134 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.201366 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-utilities\") pod \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\" (UID: \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\") " Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.201530 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-catalog-content\") pod \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\" (UID: \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\") " Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.201843 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-br4sp\" (UniqueName: \"kubernetes.io/projected/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-kube-api-access-br4sp\") pod \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\" (UID: \"4c1f7391-bfee-48fc-8d5e-ab2620f154e7\") " Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.202936 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-utilities" (OuterVolumeSpecName: "utilities") pod "4c1f7391-bfee-48fc-8d5e-ab2620f154e7" (UID: "4c1f7391-bfee-48fc-8d5e-ab2620f154e7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.211555 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-kube-api-access-br4sp" (OuterVolumeSpecName: "kube-api-access-br4sp") pod "4c1f7391-bfee-48fc-8d5e-ab2620f154e7" (UID: "4c1f7391-bfee-48fc-8d5e-ab2620f154e7"). InnerVolumeSpecName "kube-api-access-br4sp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.257595 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4c1f7391-bfee-48fc-8d5e-ab2620f154e7" (UID: "4c1f7391-bfee-48fc-8d5e-ab2620f154e7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.305628 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.305676 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.305688 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-br4sp\" (UniqueName: \"kubernetes.io/projected/4c1f7391-bfee-48fc-8d5e-ab2620f154e7-kube-api-access-br4sp\") on node \"crc\" DevicePath \"\"" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.626090 4910 generic.go:334] "Generic (PLEG): container finished" podID="4c1f7391-bfee-48fc-8d5e-ab2620f154e7" containerID="8dc8d29d05f75261e274507be9f9700a871f3012d0a3ae9835b2ea6d794e930f" exitCode=0 Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.626157 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gllm2" event={"ID":"4c1f7391-bfee-48fc-8d5e-ab2620f154e7","Type":"ContainerDied","Data":"8dc8d29d05f75261e274507be9f9700a871f3012d0a3ae9835b2ea6d794e930f"} Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.626211 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gllm2" event={"ID":"4c1f7391-bfee-48fc-8d5e-ab2620f154e7","Type":"ContainerDied","Data":"e1277b2729396e29c162378ef58d64205acf8aa4b446f4d10eae684280f05d8b"} Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.626199 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gllm2" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.626315 4910 scope.go:117] "RemoveContainer" containerID="8dc8d29d05f75261e274507be9f9700a871f3012d0a3ae9835b2ea6d794e930f" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.681575 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gllm2"] Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.681986 4910 scope.go:117] "RemoveContainer" containerID="c29ccb1e1b4d300bd6ac86e358ce95ebebb3ab930880d8ec380c667a0ee67397" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.700671 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gllm2"] Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.727814 4910 scope.go:117] "RemoveContainer" containerID="5cc2f0ef175bf960d7aced67f69f0665b0513bfee4c842d3cb02cf52a0dfe5ff" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.790046 4910 scope.go:117] "RemoveContainer" containerID="8dc8d29d05f75261e274507be9f9700a871f3012d0a3ae9835b2ea6d794e930f" Nov 25 21:55:14 crc kubenswrapper[4910]: E1125 21:55:14.794270 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8dc8d29d05f75261e274507be9f9700a871f3012d0a3ae9835b2ea6d794e930f\": container with ID starting with 8dc8d29d05f75261e274507be9f9700a871f3012d0a3ae9835b2ea6d794e930f not found: ID does not exist" containerID="8dc8d29d05f75261e274507be9f9700a871f3012d0a3ae9835b2ea6d794e930f" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.794324 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8dc8d29d05f75261e274507be9f9700a871f3012d0a3ae9835b2ea6d794e930f"} err="failed to get container status \"8dc8d29d05f75261e274507be9f9700a871f3012d0a3ae9835b2ea6d794e930f\": rpc error: code = NotFound desc = could not find container \"8dc8d29d05f75261e274507be9f9700a871f3012d0a3ae9835b2ea6d794e930f\": container with ID starting with 8dc8d29d05f75261e274507be9f9700a871f3012d0a3ae9835b2ea6d794e930f not found: ID does not exist" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.794359 4910 scope.go:117] "RemoveContainer" containerID="c29ccb1e1b4d300bd6ac86e358ce95ebebb3ab930880d8ec380c667a0ee67397" Nov 25 21:55:14 crc kubenswrapper[4910]: E1125 21:55:14.797486 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c29ccb1e1b4d300bd6ac86e358ce95ebebb3ab930880d8ec380c667a0ee67397\": container with ID starting with c29ccb1e1b4d300bd6ac86e358ce95ebebb3ab930880d8ec380c667a0ee67397 not found: ID does not exist" containerID="c29ccb1e1b4d300bd6ac86e358ce95ebebb3ab930880d8ec380c667a0ee67397" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.797520 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c29ccb1e1b4d300bd6ac86e358ce95ebebb3ab930880d8ec380c667a0ee67397"} err="failed to get container status \"c29ccb1e1b4d300bd6ac86e358ce95ebebb3ab930880d8ec380c667a0ee67397\": rpc error: code = NotFound desc = could not find container \"c29ccb1e1b4d300bd6ac86e358ce95ebebb3ab930880d8ec380c667a0ee67397\": container with ID starting with c29ccb1e1b4d300bd6ac86e358ce95ebebb3ab930880d8ec380c667a0ee67397 not found: ID does not exist" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.797538 4910 scope.go:117] "RemoveContainer" containerID="5cc2f0ef175bf960d7aced67f69f0665b0513bfee4c842d3cb02cf52a0dfe5ff" Nov 25 21:55:14 crc kubenswrapper[4910]: E1125 21:55:14.798119 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cc2f0ef175bf960d7aced67f69f0665b0513bfee4c842d3cb02cf52a0dfe5ff\": container with ID starting with 5cc2f0ef175bf960d7aced67f69f0665b0513bfee4c842d3cb02cf52a0dfe5ff not found: ID does not exist" containerID="5cc2f0ef175bf960d7aced67f69f0665b0513bfee4c842d3cb02cf52a0dfe5ff" Nov 25 21:55:14 crc kubenswrapper[4910]: I1125 21:55:14.798198 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cc2f0ef175bf960d7aced67f69f0665b0513bfee4c842d3cb02cf52a0dfe5ff"} err="failed to get container status \"5cc2f0ef175bf960d7aced67f69f0665b0513bfee4c842d3cb02cf52a0dfe5ff\": rpc error: code = NotFound desc = could not find container \"5cc2f0ef175bf960d7aced67f69f0665b0513bfee4c842d3cb02cf52a0dfe5ff\": container with ID starting with 5cc2f0ef175bf960d7aced67f69f0665b0513bfee4c842d3cb02cf52a0dfe5ff not found: ID does not exist" Nov 25 21:55:15 crc kubenswrapper[4910]: I1125 21:55:15.225559 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c1f7391-bfee-48fc-8d5e-ab2620f154e7" path="/var/lib/kubelet/pods/4c1f7391-bfee-48fc-8d5e-ab2620f154e7/volumes" Nov 25 21:55:23 crc kubenswrapper[4910]: I1125 21:55:23.099219 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 21:55:23 crc kubenswrapper[4910]: I1125 21:55:23.099672 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 21:55:23 crc kubenswrapper[4910]: I1125 21:55:23.099739 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 21:55:23 crc kubenswrapper[4910]: I1125 21:55:23.100993 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 21:55:23 crc kubenswrapper[4910]: I1125 21:55:23.101094 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" gracePeriod=600 Nov 25 21:55:23 crc kubenswrapper[4910]: E1125 21:55:23.328212 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:55:23 crc kubenswrapper[4910]: I1125 21:55:23.735992 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" exitCode=0 Nov 25 21:55:23 crc kubenswrapper[4910]: I1125 21:55:23.736077 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a"} Nov 25 21:55:23 crc kubenswrapper[4910]: I1125 21:55:23.736164 4910 scope.go:117] "RemoveContainer" containerID="7b6cad0a631fec9eb58a49d3ce7f8c662dc70b5fec077e96f2c93ef3235ee8bf" Nov 25 21:55:23 crc kubenswrapper[4910]: I1125 21:55:23.737760 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:55:23 crc kubenswrapper[4910]: E1125 21:55:23.738588 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.586669 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-k67mw"] Nov 25 21:55:26 crc kubenswrapper[4910]: E1125 21:55:26.588187 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c1f7391-bfee-48fc-8d5e-ab2620f154e7" containerName="registry-server" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.588226 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c1f7391-bfee-48fc-8d5e-ab2620f154e7" containerName="registry-server" Nov 25 21:55:26 crc kubenswrapper[4910]: E1125 21:55:26.588305 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c1f7391-bfee-48fc-8d5e-ab2620f154e7" containerName="extract-content" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.588325 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c1f7391-bfee-48fc-8d5e-ab2620f154e7" containerName="extract-content" Nov 25 21:55:26 crc kubenswrapper[4910]: E1125 21:55:26.588374 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c1f7391-bfee-48fc-8d5e-ab2620f154e7" containerName="extract-utilities" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.588395 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c1f7391-bfee-48fc-8d5e-ab2620f154e7" containerName="extract-utilities" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.588885 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c1f7391-bfee-48fc-8d5e-ab2620f154e7" containerName="registry-server" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.592843 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.601154 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k67mw"] Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.749510 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-catalog-content\") pod \"community-operators-k67mw\" (UID: \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\") " pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.750006 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-utilities\") pod \"community-operators-k67mw\" (UID: \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\") " pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.750054 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2w655\" (UniqueName: \"kubernetes.io/projected/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-kube-api-access-2w655\") pod \"community-operators-k67mw\" (UID: \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\") " pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.851844 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-catalog-content\") pod \"community-operators-k67mw\" (UID: \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\") " pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.851908 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-utilities\") pod \"community-operators-k67mw\" (UID: \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\") " pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.851959 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2w655\" (UniqueName: \"kubernetes.io/projected/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-kube-api-access-2w655\") pod \"community-operators-k67mw\" (UID: \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\") " pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.852601 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-utilities\") pod \"community-operators-k67mw\" (UID: \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\") " pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.852702 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-catalog-content\") pod \"community-operators-k67mw\" (UID: \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\") " pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.879104 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2w655\" (UniqueName: \"kubernetes.io/projected/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-kube-api-access-2w655\") pod \"community-operators-k67mw\" (UID: \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\") " pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:26 crc kubenswrapper[4910]: I1125 21:55:26.925320 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:27 crc kubenswrapper[4910]: I1125 21:55:27.515630 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k67mw"] Nov 25 21:55:27 crc kubenswrapper[4910]: I1125 21:55:27.797895 4910 generic.go:334] "Generic (PLEG): container finished" podID="29be51cc-860e-42ab-8ab1-5ad31d2c34ba" containerID="d499056675eb10e2896a62c20fcd62d2b9630bcd95326b33de69ee0f6b6639d1" exitCode=0 Nov 25 21:55:27 crc kubenswrapper[4910]: I1125 21:55:27.798027 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k67mw" event={"ID":"29be51cc-860e-42ab-8ab1-5ad31d2c34ba","Type":"ContainerDied","Data":"d499056675eb10e2896a62c20fcd62d2b9630bcd95326b33de69ee0f6b6639d1"} Nov 25 21:55:27 crc kubenswrapper[4910]: I1125 21:55:27.798656 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k67mw" event={"ID":"29be51cc-860e-42ab-8ab1-5ad31d2c34ba","Type":"ContainerStarted","Data":"9452df35e0d5cd2402952320397de916ff80f1d020b888deeb9ebb949fbfb285"} Nov 25 21:55:28 crc kubenswrapper[4910]: I1125 21:55:28.814009 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k67mw" event={"ID":"29be51cc-860e-42ab-8ab1-5ad31d2c34ba","Type":"ContainerStarted","Data":"714493e433bab5a19c92ccabe26769676e2763ab0623d54937657c9f0576c165"} Nov 25 21:55:29 crc kubenswrapper[4910]: I1125 21:55:29.832936 4910 generic.go:334] "Generic (PLEG): container finished" podID="29be51cc-860e-42ab-8ab1-5ad31d2c34ba" containerID="714493e433bab5a19c92ccabe26769676e2763ab0623d54937657c9f0576c165" exitCode=0 Nov 25 21:55:29 crc kubenswrapper[4910]: I1125 21:55:29.833010 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k67mw" event={"ID":"29be51cc-860e-42ab-8ab1-5ad31d2c34ba","Type":"ContainerDied","Data":"714493e433bab5a19c92ccabe26769676e2763ab0623d54937657c9f0576c165"} Nov 25 21:55:30 crc kubenswrapper[4910]: I1125 21:55:30.852303 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k67mw" event={"ID":"29be51cc-860e-42ab-8ab1-5ad31d2c34ba","Type":"ContainerStarted","Data":"95767c299219952c8bac1dbd48eb0f20c14c1a05af3c658ed61d912a9c3ecaee"} Nov 25 21:55:30 crc kubenswrapper[4910]: I1125 21:55:30.883500 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-k67mw" podStartSLOduration=2.402140874 podStartE2EDuration="4.883465029s" podCreationTimestamp="2025-11-25 21:55:26 +0000 UTC" firstStartedPulling="2025-11-25 21:55:27.804850542 +0000 UTC m=+1483.267326864" lastFinishedPulling="2025-11-25 21:55:30.286174707 +0000 UTC m=+1485.748651019" observedRunningTime="2025-11-25 21:55:30.879456751 +0000 UTC m=+1486.341933103" watchObservedRunningTime="2025-11-25 21:55:30.883465029 +0000 UTC m=+1486.345941401" Nov 25 21:55:36 crc kubenswrapper[4910]: I1125 21:55:36.926502 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:36 crc kubenswrapper[4910]: I1125 21:55:36.927533 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:37 crc kubenswrapper[4910]: I1125 21:55:37.009901 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:37 crc kubenswrapper[4910]: I1125 21:55:37.206207 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:55:37 crc kubenswrapper[4910]: E1125 21:55:37.206642 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:55:38 crc kubenswrapper[4910]: I1125 21:55:38.022502 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:38 crc kubenswrapper[4910]: I1125 21:55:38.099041 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k67mw"] Nov 25 21:55:39 crc kubenswrapper[4910]: I1125 21:55:39.966818 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-k67mw" podUID="29be51cc-860e-42ab-8ab1-5ad31d2c34ba" containerName="registry-server" containerID="cri-o://95767c299219952c8bac1dbd48eb0f20c14c1a05af3c658ed61d912a9c3ecaee" gracePeriod=2 Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.569176 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.699951 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-catalog-content\") pod \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\" (UID: \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\") " Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.700173 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w655\" (UniqueName: \"kubernetes.io/projected/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-kube-api-access-2w655\") pod \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\" (UID: \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\") " Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.700294 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-utilities\") pod \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\" (UID: \"29be51cc-860e-42ab-8ab1-5ad31d2c34ba\") " Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.701410 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-utilities" (OuterVolumeSpecName: "utilities") pod "29be51cc-860e-42ab-8ab1-5ad31d2c34ba" (UID: "29be51cc-860e-42ab-8ab1-5ad31d2c34ba"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.727968 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-kube-api-access-2w655" (OuterVolumeSpecName: "kube-api-access-2w655") pod "29be51cc-860e-42ab-8ab1-5ad31d2c34ba" (UID: "29be51cc-860e-42ab-8ab1-5ad31d2c34ba"). InnerVolumeSpecName "kube-api-access-2w655". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.766895 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "29be51cc-860e-42ab-8ab1-5ad31d2c34ba" (UID: "29be51cc-860e-42ab-8ab1-5ad31d2c34ba"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.803781 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.803859 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w655\" (UniqueName: \"kubernetes.io/projected/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-kube-api-access-2w655\") on node \"crc\" DevicePath \"\"" Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.803877 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29be51cc-860e-42ab-8ab1-5ad31d2c34ba-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.985195 4910 generic.go:334] "Generic (PLEG): container finished" podID="29be51cc-860e-42ab-8ab1-5ad31d2c34ba" containerID="95767c299219952c8bac1dbd48eb0f20c14c1a05af3c658ed61d912a9c3ecaee" exitCode=0 Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.985320 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k67mw" Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.985333 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k67mw" event={"ID":"29be51cc-860e-42ab-8ab1-5ad31d2c34ba","Type":"ContainerDied","Data":"95767c299219952c8bac1dbd48eb0f20c14c1a05af3c658ed61d912a9c3ecaee"} Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.985951 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k67mw" event={"ID":"29be51cc-860e-42ab-8ab1-5ad31d2c34ba","Type":"ContainerDied","Data":"9452df35e0d5cd2402952320397de916ff80f1d020b888deeb9ebb949fbfb285"} Nov 25 21:55:40 crc kubenswrapper[4910]: I1125 21:55:40.985981 4910 scope.go:117] "RemoveContainer" containerID="95767c299219952c8bac1dbd48eb0f20c14c1a05af3c658ed61d912a9c3ecaee" Nov 25 21:55:41 crc kubenswrapper[4910]: I1125 21:55:41.041140 4910 scope.go:117] "RemoveContainer" containerID="714493e433bab5a19c92ccabe26769676e2763ab0623d54937657c9f0576c165" Nov 25 21:55:41 crc kubenswrapper[4910]: I1125 21:55:41.044491 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-k67mw"] Nov 25 21:55:41 crc kubenswrapper[4910]: I1125 21:55:41.055729 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-k67mw"] Nov 25 21:55:41 crc kubenswrapper[4910]: I1125 21:55:41.082305 4910 scope.go:117] "RemoveContainer" containerID="d499056675eb10e2896a62c20fcd62d2b9630bcd95326b33de69ee0f6b6639d1" Nov 25 21:55:41 crc kubenswrapper[4910]: I1125 21:55:41.129373 4910 scope.go:117] "RemoveContainer" containerID="95767c299219952c8bac1dbd48eb0f20c14c1a05af3c658ed61d912a9c3ecaee" Nov 25 21:55:41 crc kubenswrapper[4910]: E1125 21:55:41.130006 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95767c299219952c8bac1dbd48eb0f20c14c1a05af3c658ed61d912a9c3ecaee\": container with ID starting with 95767c299219952c8bac1dbd48eb0f20c14c1a05af3c658ed61d912a9c3ecaee not found: ID does not exist" containerID="95767c299219952c8bac1dbd48eb0f20c14c1a05af3c658ed61d912a9c3ecaee" Nov 25 21:55:41 crc kubenswrapper[4910]: I1125 21:55:41.130079 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95767c299219952c8bac1dbd48eb0f20c14c1a05af3c658ed61d912a9c3ecaee"} err="failed to get container status \"95767c299219952c8bac1dbd48eb0f20c14c1a05af3c658ed61d912a9c3ecaee\": rpc error: code = NotFound desc = could not find container \"95767c299219952c8bac1dbd48eb0f20c14c1a05af3c658ed61d912a9c3ecaee\": container with ID starting with 95767c299219952c8bac1dbd48eb0f20c14c1a05af3c658ed61d912a9c3ecaee not found: ID does not exist" Nov 25 21:55:41 crc kubenswrapper[4910]: I1125 21:55:41.130117 4910 scope.go:117] "RemoveContainer" containerID="714493e433bab5a19c92ccabe26769676e2763ab0623d54937657c9f0576c165" Nov 25 21:55:41 crc kubenswrapper[4910]: E1125 21:55:41.130589 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"714493e433bab5a19c92ccabe26769676e2763ab0623d54937657c9f0576c165\": container with ID starting with 714493e433bab5a19c92ccabe26769676e2763ab0623d54937657c9f0576c165 not found: ID does not exist" containerID="714493e433bab5a19c92ccabe26769676e2763ab0623d54937657c9f0576c165" Nov 25 21:55:41 crc kubenswrapper[4910]: I1125 21:55:41.130617 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"714493e433bab5a19c92ccabe26769676e2763ab0623d54937657c9f0576c165"} err="failed to get container status \"714493e433bab5a19c92ccabe26769676e2763ab0623d54937657c9f0576c165\": rpc error: code = NotFound desc = could not find container \"714493e433bab5a19c92ccabe26769676e2763ab0623d54937657c9f0576c165\": container with ID starting with 714493e433bab5a19c92ccabe26769676e2763ab0623d54937657c9f0576c165 not found: ID does not exist" Nov 25 21:55:41 crc kubenswrapper[4910]: I1125 21:55:41.130634 4910 scope.go:117] "RemoveContainer" containerID="d499056675eb10e2896a62c20fcd62d2b9630bcd95326b33de69ee0f6b6639d1" Nov 25 21:55:41 crc kubenswrapper[4910]: E1125 21:55:41.130927 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d499056675eb10e2896a62c20fcd62d2b9630bcd95326b33de69ee0f6b6639d1\": container with ID starting with d499056675eb10e2896a62c20fcd62d2b9630bcd95326b33de69ee0f6b6639d1 not found: ID does not exist" containerID="d499056675eb10e2896a62c20fcd62d2b9630bcd95326b33de69ee0f6b6639d1" Nov 25 21:55:41 crc kubenswrapper[4910]: I1125 21:55:41.130969 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d499056675eb10e2896a62c20fcd62d2b9630bcd95326b33de69ee0f6b6639d1"} err="failed to get container status \"d499056675eb10e2896a62c20fcd62d2b9630bcd95326b33de69ee0f6b6639d1\": rpc error: code = NotFound desc = could not find container \"d499056675eb10e2896a62c20fcd62d2b9630bcd95326b33de69ee0f6b6639d1\": container with ID starting with d499056675eb10e2896a62c20fcd62d2b9630bcd95326b33de69ee0f6b6639d1 not found: ID does not exist" Nov 25 21:55:41 crc kubenswrapper[4910]: I1125 21:55:41.217946 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29be51cc-860e-42ab-8ab1-5ad31d2c34ba" path="/var/lib/kubelet/pods/29be51cc-860e-42ab-8ab1-5ad31d2c34ba/volumes" Nov 25 21:55:44 crc kubenswrapper[4910]: I1125 21:55:44.035170 4910 generic.go:334] "Generic (PLEG): container finished" podID="6fbaf31f-bfe9-4f0a-a064-75d015480249" containerID="746988820a64cee990defcbd42892fbeace3fa4595e78a4b68fdde15bc6e4121" exitCode=0 Nov 25 21:55:44 crc kubenswrapper[4910]: I1125 21:55:44.035305 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" event={"ID":"6fbaf31f-bfe9-4f0a-a064-75d015480249","Type":"ContainerDied","Data":"746988820a64cee990defcbd42892fbeace3fa4595e78a4b68fdde15bc6e4121"} Nov 25 21:55:45 crc kubenswrapper[4910]: I1125 21:55:45.589187 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:55:45 crc kubenswrapper[4910]: I1125 21:55:45.732723 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-bootstrap-combined-ca-bundle\") pod \"6fbaf31f-bfe9-4f0a-a064-75d015480249\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " Nov 25 21:55:45 crc kubenswrapper[4910]: I1125 21:55:45.732986 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-ssh-key\") pod \"6fbaf31f-bfe9-4f0a-a064-75d015480249\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " Nov 25 21:55:45 crc kubenswrapper[4910]: I1125 21:55:45.733179 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-inventory\") pod \"6fbaf31f-bfe9-4f0a-a064-75d015480249\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " Nov 25 21:55:45 crc kubenswrapper[4910]: I1125 21:55:45.733231 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4zvw\" (UniqueName: \"kubernetes.io/projected/6fbaf31f-bfe9-4f0a-a064-75d015480249-kube-api-access-q4zvw\") pod \"6fbaf31f-bfe9-4f0a-a064-75d015480249\" (UID: \"6fbaf31f-bfe9-4f0a-a064-75d015480249\") " Nov 25 21:55:45 crc kubenswrapper[4910]: I1125 21:55:45.740686 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "6fbaf31f-bfe9-4f0a-a064-75d015480249" (UID: "6fbaf31f-bfe9-4f0a-a064-75d015480249"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:55:45 crc kubenswrapper[4910]: I1125 21:55:45.741862 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fbaf31f-bfe9-4f0a-a064-75d015480249-kube-api-access-q4zvw" (OuterVolumeSpecName: "kube-api-access-q4zvw") pod "6fbaf31f-bfe9-4f0a-a064-75d015480249" (UID: "6fbaf31f-bfe9-4f0a-a064-75d015480249"). InnerVolumeSpecName "kube-api-access-q4zvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:55:45 crc kubenswrapper[4910]: I1125 21:55:45.769678 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6fbaf31f-bfe9-4f0a-a064-75d015480249" (UID: "6fbaf31f-bfe9-4f0a-a064-75d015480249"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:55:45 crc kubenswrapper[4910]: I1125 21:55:45.792507 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-inventory" (OuterVolumeSpecName: "inventory") pod "6fbaf31f-bfe9-4f0a-a064-75d015480249" (UID: "6fbaf31f-bfe9-4f0a-a064-75d015480249"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:55:45 crc kubenswrapper[4910]: I1125 21:55:45.837841 4910 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 21:55:45 crc kubenswrapper[4910]: I1125 21:55:45.837908 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 21:55:45 crc kubenswrapper[4910]: I1125 21:55:45.837930 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6fbaf31f-bfe9-4f0a-a064-75d015480249-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 21:55:45 crc kubenswrapper[4910]: I1125 21:55:45.837951 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4zvw\" (UniqueName: \"kubernetes.io/projected/6fbaf31f-bfe9-4f0a-a064-75d015480249-kube-api-access-q4zvw\") on node \"crc\" DevicePath \"\"" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.073560 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" event={"ID":"6fbaf31f-bfe9-4f0a-a064-75d015480249","Type":"ContainerDied","Data":"3e13d6808af9fcf2ca06592293e92615ab4defe5b8da8623b641db97a98583c6"} Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.074418 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3e13d6808af9fcf2ca06592293e92615ab4defe5b8da8623b641db97a98583c6" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.073638 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.210385 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs"] Nov 25 21:55:46 crc kubenswrapper[4910]: E1125 21:55:46.211526 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29be51cc-860e-42ab-8ab1-5ad31d2c34ba" containerName="extract-content" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.211575 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="29be51cc-860e-42ab-8ab1-5ad31d2c34ba" containerName="extract-content" Nov 25 21:55:46 crc kubenswrapper[4910]: E1125 21:55:46.211644 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29be51cc-860e-42ab-8ab1-5ad31d2c34ba" containerName="registry-server" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.211664 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="29be51cc-860e-42ab-8ab1-5ad31d2c34ba" containerName="registry-server" Nov 25 21:55:46 crc kubenswrapper[4910]: E1125 21:55:46.211719 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29be51cc-860e-42ab-8ab1-5ad31d2c34ba" containerName="extract-utilities" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.211741 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="29be51cc-860e-42ab-8ab1-5ad31d2c34ba" containerName="extract-utilities" Nov 25 21:55:46 crc kubenswrapper[4910]: E1125 21:55:46.211825 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fbaf31f-bfe9-4f0a-a064-75d015480249" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.211865 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fbaf31f-bfe9-4f0a-a064-75d015480249" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.212419 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fbaf31f-bfe9-4f0a-a064-75d015480249" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.212466 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="29be51cc-860e-42ab-8ab1-5ad31d2c34ba" containerName="registry-server" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.214123 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.218613 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.219083 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs"] Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.219221 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.219716 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.224653 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.351890 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6f639c3-729d-4c6a-9e97-afb151569af5-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-24hvs\" (UID: \"a6f639c3-729d-4c6a-9e97-afb151569af5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.352000 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9gstq\" (UniqueName: \"kubernetes.io/projected/a6f639c3-729d-4c6a-9e97-afb151569af5-kube-api-access-9gstq\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-24hvs\" (UID: \"a6f639c3-729d-4c6a-9e97-afb151569af5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.352281 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6f639c3-729d-4c6a-9e97-afb151569af5-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-24hvs\" (UID: \"a6f639c3-729d-4c6a-9e97-afb151569af5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.454925 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6f639c3-729d-4c6a-9e97-afb151569af5-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-24hvs\" (UID: \"a6f639c3-729d-4c6a-9e97-afb151569af5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.455061 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9gstq\" (UniqueName: \"kubernetes.io/projected/a6f639c3-729d-4c6a-9e97-afb151569af5-kube-api-access-9gstq\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-24hvs\" (UID: \"a6f639c3-729d-4c6a-9e97-afb151569af5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.455282 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6f639c3-729d-4c6a-9e97-afb151569af5-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-24hvs\" (UID: \"a6f639c3-729d-4c6a-9e97-afb151569af5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.459530 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6f639c3-729d-4c6a-9e97-afb151569af5-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-24hvs\" (UID: \"a6f639c3-729d-4c6a-9e97-afb151569af5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.464960 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6f639c3-729d-4c6a-9e97-afb151569af5-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-24hvs\" (UID: \"a6f639c3-729d-4c6a-9e97-afb151569af5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.480546 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9gstq\" (UniqueName: \"kubernetes.io/projected/a6f639c3-729d-4c6a-9e97-afb151569af5-kube-api-access-9gstq\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-24hvs\" (UID: \"a6f639c3-729d-4c6a-9e97-afb151569af5\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" Nov 25 21:55:46 crc kubenswrapper[4910]: I1125 21:55:46.543911 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" Nov 25 21:55:47 crc kubenswrapper[4910]: I1125 21:55:47.142559 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs"] Nov 25 21:55:47 crc kubenswrapper[4910]: I1125 21:55:47.172118 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 21:55:48 crc kubenswrapper[4910]: I1125 21:55:48.095388 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" event={"ID":"a6f639c3-729d-4c6a-9e97-afb151569af5","Type":"ContainerStarted","Data":"91bf74b76db084dc0d30bd832e7728ae5d70a33d1cc538a75642b5b88fc99d8f"} Nov 25 21:55:49 crc kubenswrapper[4910]: I1125 21:55:49.113899 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" event={"ID":"a6f639c3-729d-4c6a-9e97-afb151569af5","Type":"ContainerStarted","Data":"e4a23da45ee047e3b6b457b6df214baca7f689b35562b1608345063e9bc78241"} Nov 25 21:55:49 crc kubenswrapper[4910]: I1125 21:55:49.135578 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" podStartSLOduration=2.386702439 podStartE2EDuration="3.135524037s" podCreationTimestamp="2025-11-25 21:55:46 +0000 UTC" firstStartedPulling="2025-11-25 21:55:47.171816232 +0000 UTC m=+1502.634292554" lastFinishedPulling="2025-11-25 21:55:47.92063782 +0000 UTC m=+1503.383114152" observedRunningTime="2025-11-25 21:55:49.13339611 +0000 UTC m=+1504.595872492" watchObservedRunningTime="2025-11-25 21:55:49.135524037 +0000 UTC m=+1504.598000399" Nov 25 21:55:50 crc kubenswrapper[4910]: I1125 21:55:50.205135 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:55:50 crc kubenswrapper[4910]: E1125 21:55:50.205629 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.533768 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lpspv"] Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.537917 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.548511 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpspv"] Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.573268 4910 scope.go:117] "RemoveContainer" containerID="9c9e6b721a2a063bee8d5e70902a93971d4989ad8b50482f6efc7989ae27c276" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.602021 4910 scope.go:117] "RemoveContainer" containerID="108d7aed134ed308c0e5b1b87dccf06757d09d8407c4f1aede35818f1f078140" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.602316 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9489a8f8-812f-48a1-ad2e-ae74d672a349-utilities\") pod \"redhat-marketplace-lpspv\" (UID: \"9489a8f8-812f-48a1-ad2e-ae74d672a349\") " pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.602387 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwg4b\" (UniqueName: \"kubernetes.io/projected/9489a8f8-812f-48a1-ad2e-ae74d672a349-kube-api-access-jwg4b\") pod \"redhat-marketplace-lpspv\" (UID: \"9489a8f8-812f-48a1-ad2e-ae74d672a349\") " pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.602460 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9489a8f8-812f-48a1-ad2e-ae74d672a349-catalog-content\") pod \"redhat-marketplace-lpspv\" (UID: \"9489a8f8-812f-48a1-ad2e-ae74d672a349\") " pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.627803 4910 scope.go:117] "RemoveContainer" containerID="7fdc1d6a7ae0b279f32b318f90f6ad0d8af6130f11e0bd2b5cb81924049d9dc2" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.653143 4910 scope.go:117] "RemoveContainer" containerID="63f726954dc834029c3701691afaafacd775f92d700ed27908c265c6addf97d6" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.682003 4910 scope.go:117] "RemoveContainer" containerID="b206e1ed75c1d3a21d9f511183b83546762b9eeb204eafa61e26fef50296c718" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.705231 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9489a8f8-812f-48a1-ad2e-ae74d672a349-utilities\") pod \"redhat-marketplace-lpspv\" (UID: \"9489a8f8-812f-48a1-ad2e-ae74d672a349\") " pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.705314 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwg4b\" (UniqueName: \"kubernetes.io/projected/9489a8f8-812f-48a1-ad2e-ae74d672a349-kube-api-access-jwg4b\") pod \"redhat-marketplace-lpspv\" (UID: \"9489a8f8-812f-48a1-ad2e-ae74d672a349\") " pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.705392 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9489a8f8-812f-48a1-ad2e-ae74d672a349-catalog-content\") pod \"redhat-marketplace-lpspv\" (UID: \"9489a8f8-812f-48a1-ad2e-ae74d672a349\") " pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.705752 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9489a8f8-812f-48a1-ad2e-ae74d672a349-utilities\") pod \"redhat-marketplace-lpspv\" (UID: \"9489a8f8-812f-48a1-ad2e-ae74d672a349\") " pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.706098 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9489a8f8-812f-48a1-ad2e-ae74d672a349-catalog-content\") pod \"redhat-marketplace-lpspv\" (UID: \"9489a8f8-812f-48a1-ad2e-ae74d672a349\") " pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.707583 4910 scope.go:117] "RemoveContainer" containerID="f2eff9ca2d690397a087e89412ac80f764261e485f8ce031b3229952101e11f9" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.724791 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwg4b\" (UniqueName: \"kubernetes.io/projected/9489a8f8-812f-48a1-ad2e-ae74d672a349-kube-api-access-jwg4b\") pod \"redhat-marketplace-lpspv\" (UID: \"9489a8f8-812f-48a1-ad2e-ae74d672a349\") " pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.735495 4910 scope.go:117] "RemoveContainer" containerID="d2f620d772660f1a41f891f22352d87871a684fc4c6c890f81c8dde9db321244" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.797550 4910 scope.go:117] "RemoveContainer" containerID="e81fdca7f0f754e4680f00d08defa429b51b16f99db2da0b49b542973eec3507" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.816486 4910 scope.go:117] "RemoveContainer" containerID="f2fbfa0a15dda9fc0c46bf6aa78b8ad7cdb8f6f3ed839307df135fb701a63c0f" Nov 25 21:55:52 crc kubenswrapper[4910]: I1125 21:55:52.874405 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:55:53 crc kubenswrapper[4910]: I1125 21:55:53.428909 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpspv"] Nov 25 21:55:54 crc kubenswrapper[4910]: I1125 21:55:54.162623 4910 generic.go:334] "Generic (PLEG): container finished" podID="9489a8f8-812f-48a1-ad2e-ae74d672a349" containerID="1c4694508214a9eeae84dbf1fef76bf631631415465023d583fc7763800870b6" exitCode=0 Nov 25 21:55:54 crc kubenswrapper[4910]: I1125 21:55:54.162674 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpspv" event={"ID":"9489a8f8-812f-48a1-ad2e-ae74d672a349","Type":"ContainerDied","Data":"1c4694508214a9eeae84dbf1fef76bf631631415465023d583fc7763800870b6"} Nov 25 21:55:54 crc kubenswrapper[4910]: I1125 21:55:54.162704 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpspv" event={"ID":"9489a8f8-812f-48a1-ad2e-ae74d672a349","Type":"ContainerStarted","Data":"5d2986682bea44bf123bd9a184e7a492f2dfaf01b3b8c34cd8ef20177ba66abe"} Nov 25 21:55:56 crc kubenswrapper[4910]: I1125 21:55:56.213064 4910 generic.go:334] "Generic (PLEG): container finished" podID="9489a8f8-812f-48a1-ad2e-ae74d672a349" containerID="c94345dcaca25a5ba031173f1772765bf176e3b09eb9f04b0cf5b7911ac36ea6" exitCode=0 Nov 25 21:55:56 crc kubenswrapper[4910]: I1125 21:55:56.213205 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpspv" event={"ID":"9489a8f8-812f-48a1-ad2e-ae74d672a349","Type":"ContainerDied","Data":"c94345dcaca25a5ba031173f1772765bf176e3b09eb9f04b0cf5b7911ac36ea6"} Nov 25 21:55:57 crc kubenswrapper[4910]: I1125 21:55:57.224701 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpspv" event={"ID":"9489a8f8-812f-48a1-ad2e-ae74d672a349","Type":"ContainerStarted","Data":"ce6b6d0a9695f387d8ef0146e43b7116a40de43fdc4dcbddf8d087274e5c6c1c"} Nov 25 21:55:57 crc kubenswrapper[4910]: I1125 21:55:57.252717 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lpspv" podStartSLOduration=2.780678161 podStartE2EDuration="5.252695346s" podCreationTimestamp="2025-11-25 21:55:52 +0000 UTC" firstStartedPulling="2025-11-25 21:55:54.164866223 +0000 UTC m=+1509.627342535" lastFinishedPulling="2025-11-25 21:55:56.636883398 +0000 UTC m=+1512.099359720" observedRunningTime="2025-11-25 21:55:57.249081679 +0000 UTC m=+1512.711558011" watchObservedRunningTime="2025-11-25 21:55:57.252695346 +0000 UTC m=+1512.715171668" Nov 25 21:56:02 crc kubenswrapper[4910]: I1125 21:56:02.875627 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:56:02 crc kubenswrapper[4910]: I1125 21:56:02.876086 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:56:02 crc kubenswrapper[4910]: I1125 21:56:02.928440 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:56:03 crc kubenswrapper[4910]: I1125 21:56:03.389726 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:56:03 crc kubenswrapper[4910]: I1125 21:56:03.452308 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpspv"] Nov 25 21:56:05 crc kubenswrapper[4910]: I1125 21:56:05.218736 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:56:05 crc kubenswrapper[4910]: E1125 21:56:05.221689 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:56:05 crc kubenswrapper[4910]: I1125 21:56:05.347962 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-lpspv" podUID="9489a8f8-812f-48a1-ad2e-ae74d672a349" containerName="registry-server" containerID="cri-o://ce6b6d0a9695f387d8ef0146e43b7116a40de43fdc4dcbddf8d087274e5c6c1c" gracePeriod=2 Nov 25 21:56:05 crc kubenswrapper[4910]: I1125 21:56:05.833609 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:56:05 crc kubenswrapper[4910]: I1125 21:56:05.920552 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwg4b\" (UniqueName: \"kubernetes.io/projected/9489a8f8-812f-48a1-ad2e-ae74d672a349-kube-api-access-jwg4b\") pod \"9489a8f8-812f-48a1-ad2e-ae74d672a349\" (UID: \"9489a8f8-812f-48a1-ad2e-ae74d672a349\") " Nov 25 21:56:05 crc kubenswrapper[4910]: I1125 21:56:05.920665 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9489a8f8-812f-48a1-ad2e-ae74d672a349-catalog-content\") pod \"9489a8f8-812f-48a1-ad2e-ae74d672a349\" (UID: \"9489a8f8-812f-48a1-ad2e-ae74d672a349\") " Nov 25 21:56:05 crc kubenswrapper[4910]: I1125 21:56:05.920735 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9489a8f8-812f-48a1-ad2e-ae74d672a349-utilities\") pod \"9489a8f8-812f-48a1-ad2e-ae74d672a349\" (UID: \"9489a8f8-812f-48a1-ad2e-ae74d672a349\") " Nov 25 21:56:05 crc kubenswrapper[4910]: I1125 21:56:05.922434 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9489a8f8-812f-48a1-ad2e-ae74d672a349-utilities" (OuterVolumeSpecName: "utilities") pod "9489a8f8-812f-48a1-ad2e-ae74d672a349" (UID: "9489a8f8-812f-48a1-ad2e-ae74d672a349"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:56:05 crc kubenswrapper[4910]: I1125 21:56:05.929690 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9489a8f8-812f-48a1-ad2e-ae74d672a349-kube-api-access-jwg4b" (OuterVolumeSpecName: "kube-api-access-jwg4b") pod "9489a8f8-812f-48a1-ad2e-ae74d672a349" (UID: "9489a8f8-812f-48a1-ad2e-ae74d672a349"). InnerVolumeSpecName "kube-api-access-jwg4b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:56:05 crc kubenswrapper[4910]: I1125 21:56:05.943604 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9489a8f8-812f-48a1-ad2e-ae74d672a349-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9489a8f8-812f-48a1-ad2e-ae74d672a349" (UID: "9489a8f8-812f-48a1-ad2e-ae74d672a349"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.024052 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwg4b\" (UniqueName: \"kubernetes.io/projected/9489a8f8-812f-48a1-ad2e-ae74d672a349-kube-api-access-jwg4b\") on node \"crc\" DevicePath \"\"" Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.024090 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9489a8f8-812f-48a1-ad2e-ae74d672a349-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.024103 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9489a8f8-812f-48a1-ad2e-ae74d672a349-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.361096 4910 generic.go:334] "Generic (PLEG): container finished" podID="9489a8f8-812f-48a1-ad2e-ae74d672a349" containerID="ce6b6d0a9695f387d8ef0146e43b7116a40de43fdc4dcbddf8d087274e5c6c1c" exitCode=0 Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.361145 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpspv" event={"ID":"9489a8f8-812f-48a1-ad2e-ae74d672a349","Type":"ContainerDied","Data":"ce6b6d0a9695f387d8ef0146e43b7116a40de43fdc4dcbddf8d087274e5c6c1c"} Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.361184 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lpspv" event={"ID":"9489a8f8-812f-48a1-ad2e-ae74d672a349","Type":"ContainerDied","Data":"5d2986682bea44bf123bd9a184e7a492f2dfaf01b3b8c34cd8ef20177ba66abe"} Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.361205 4910 scope.go:117] "RemoveContainer" containerID="ce6b6d0a9695f387d8ef0146e43b7116a40de43fdc4dcbddf8d087274e5c6c1c" Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.361214 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lpspv" Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.408431 4910 scope.go:117] "RemoveContainer" containerID="c94345dcaca25a5ba031173f1772765bf176e3b09eb9f04b0cf5b7911ac36ea6" Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.413805 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpspv"] Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.423880 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-lpspv"] Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.442228 4910 scope.go:117] "RemoveContainer" containerID="1c4694508214a9eeae84dbf1fef76bf631631415465023d583fc7763800870b6" Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.507014 4910 scope.go:117] "RemoveContainer" containerID="ce6b6d0a9695f387d8ef0146e43b7116a40de43fdc4dcbddf8d087274e5c6c1c" Nov 25 21:56:06 crc kubenswrapper[4910]: E1125 21:56:06.507764 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce6b6d0a9695f387d8ef0146e43b7116a40de43fdc4dcbddf8d087274e5c6c1c\": container with ID starting with ce6b6d0a9695f387d8ef0146e43b7116a40de43fdc4dcbddf8d087274e5c6c1c not found: ID does not exist" containerID="ce6b6d0a9695f387d8ef0146e43b7116a40de43fdc4dcbddf8d087274e5c6c1c" Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.507842 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce6b6d0a9695f387d8ef0146e43b7116a40de43fdc4dcbddf8d087274e5c6c1c"} err="failed to get container status \"ce6b6d0a9695f387d8ef0146e43b7116a40de43fdc4dcbddf8d087274e5c6c1c\": rpc error: code = NotFound desc = could not find container \"ce6b6d0a9695f387d8ef0146e43b7116a40de43fdc4dcbddf8d087274e5c6c1c\": container with ID starting with ce6b6d0a9695f387d8ef0146e43b7116a40de43fdc4dcbddf8d087274e5c6c1c not found: ID does not exist" Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.507883 4910 scope.go:117] "RemoveContainer" containerID="c94345dcaca25a5ba031173f1772765bf176e3b09eb9f04b0cf5b7911ac36ea6" Nov 25 21:56:06 crc kubenswrapper[4910]: E1125 21:56:06.508395 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c94345dcaca25a5ba031173f1772765bf176e3b09eb9f04b0cf5b7911ac36ea6\": container with ID starting with c94345dcaca25a5ba031173f1772765bf176e3b09eb9f04b0cf5b7911ac36ea6 not found: ID does not exist" containerID="c94345dcaca25a5ba031173f1772765bf176e3b09eb9f04b0cf5b7911ac36ea6" Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.508455 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c94345dcaca25a5ba031173f1772765bf176e3b09eb9f04b0cf5b7911ac36ea6"} err="failed to get container status \"c94345dcaca25a5ba031173f1772765bf176e3b09eb9f04b0cf5b7911ac36ea6\": rpc error: code = NotFound desc = could not find container \"c94345dcaca25a5ba031173f1772765bf176e3b09eb9f04b0cf5b7911ac36ea6\": container with ID starting with c94345dcaca25a5ba031173f1772765bf176e3b09eb9f04b0cf5b7911ac36ea6 not found: ID does not exist" Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.508505 4910 scope.go:117] "RemoveContainer" containerID="1c4694508214a9eeae84dbf1fef76bf631631415465023d583fc7763800870b6" Nov 25 21:56:06 crc kubenswrapper[4910]: E1125 21:56:06.508886 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c4694508214a9eeae84dbf1fef76bf631631415465023d583fc7763800870b6\": container with ID starting with 1c4694508214a9eeae84dbf1fef76bf631631415465023d583fc7763800870b6 not found: ID does not exist" containerID="1c4694508214a9eeae84dbf1fef76bf631631415465023d583fc7763800870b6" Nov 25 21:56:06 crc kubenswrapper[4910]: I1125 21:56:06.508924 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c4694508214a9eeae84dbf1fef76bf631631415465023d583fc7763800870b6"} err="failed to get container status \"1c4694508214a9eeae84dbf1fef76bf631631415465023d583fc7763800870b6\": rpc error: code = NotFound desc = could not find container \"1c4694508214a9eeae84dbf1fef76bf631631415465023d583fc7763800870b6\": container with ID starting with 1c4694508214a9eeae84dbf1fef76bf631631415465023d583fc7763800870b6 not found: ID does not exist" Nov 25 21:56:07 crc kubenswrapper[4910]: I1125 21:56:07.230152 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9489a8f8-812f-48a1-ad2e-ae74d672a349" path="/var/lib/kubelet/pods/9489a8f8-812f-48a1-ad2e-ae74d672a349/volumes" Nov 25 21:56:20 crc kubenswrapper[4910]: I1125 21:56:20.204296 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:56:20 crc kubenswrapper[4910]: E1125 21:56:20.205114 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:56:30 crc kubenswrapper[4910]: I1125 21:56:30.062824 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-bf9cj"] Nov 25 21:56:30 crc kubenswrapper[4910]: I1125 21:56:30.083209 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-nvnbv"] Nov 25 21:56:30 crc kubenswrapper[4910]: I1125 21:56:30.098952 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-fbfdb"] Nov 25 21:56:30 crc kubenswrapper[4910]: I1125 21:56:30.110825 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-a73a-account-create-update-96g2p"] Nov 25 21:56:30 crc kubenswrapper[4910]: I1125 21:56:30.119867 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-89b5-account-create-update-h744x"] Nov 25 21:56:30 crc kubenswrapper[4910]: I1125 21:56:30.129535 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-21bb-account-create-update-7plzn"] Nov 25 21:56:30 crc kubenswrapper[4910]: I1125 21:56:30.139128 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-bf9cj"] Nov 25 21:56:30 crc kubenswrapper[4910]: I1125 21:56:30.147412 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-nvnbv"] Nov 25 21:56:30 crc kubenswrapper[4910]: I1125 21:56:30.155737 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-89b5-account-create-update-h744x"] Nov 25 21:56:30 crc kubenswrapper[4910]: I1125 21:56:30.164253 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-fbfdb"] Nov 25 21:56:30 crc kubenswrapper[4910]: I1125 21:56:30.172052 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-a73a-account-create-update-96g2p"] Nov 25 21:56:30 crc kubenswrapper[4910]: I1125 21:56:30.179551 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-21bb-account-create-update-7plzn"] Nov 25 21:56:31 crc kubenswrapper[4910]: I1125 21:56:31.221669 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="060d8a13-8904-4f0b-967b-108554dec9e7" path="/var/lib/kubelet/pods/060d8a13-8904-4f0b-967b-108554dec9e7/volumes" Nov 25 21:56:31 crc kubenswrapper[4910]: I1125 21:56:31.223153 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e32216a-8f9f-440a-9c2a-bba04eb3f0e3" path="/var/lib/kubelet/pods/6e32216a-8f9f-440a-9c2a-bba04eb3f0e3/volumes" Nov 25 21:56:31 crc kubenswrapper[4910]: I1125 21:56:31.223864 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e02073f-e958-48a0-8bf6-8e9959924424" path="/var/lib/kubelet/pods/8e02073f-e958-48a0-8bf6-8e9959924424/volumes" Nov 25 21:56:31 crc kubenswrapper[4910]: I1125 21:56:31.224525 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="afe4e40b-44ed-477b-a1d8-b9b973042e11" path="/var/lib/kubelet/pods/afe4e40b-44ed-477b-a1d8-b9b973042e11/volumes" Nov 25 21:56:31 crc kubenswrapper[4910]: I1125 21:56:31.225926 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d01ae6d0-c74e-49fc-9681-0d6738b0f92b" path="/var/lib/kubelet/pods/d01ae6d0-c74e-49fc-9681-0d6738b0f92b/volumes" Nov 25 21:56:31 crc kubenswrapper[4910]: I1125 21:56:31.226660 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7dabc19-8625-4a01-9ad5-f0370ec7d608" path="/var/lib/kubelet/pods/e7dabc19-8625-4a01-9ad5-f0370ec7d608/volumes" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.009803 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vrsmc"] Nov 25 21:56:33 crc kubenswrapper[4910]: E1125 21:56:33.010216 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9489a8f8-812f-48a1-ad2e-ae74d672a349" containerName="extract-utilities" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.010229 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9489a8f8-812f-48a1-ad2e-ae74d672a349" containerName="extract-utilities" Nov 25 21:56:33 crc kubenswrapper[4910]: E1125 21:56:33.010272 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9489a8f8-812f-48a1-ad2e-ae74d672a349" containerName="extract-content" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.010280 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9489a8f8-812f-48a1-ad2e-ae74d672a349" containerName="extract-content" Nov 25 21:56:33 crc kubenswrapper[4910]: E1125 21:56:33.010291 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9489a8f8-812f-48a1-ad2e-ae74d672a349" containerName="registry-server" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.010298 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9489a8f8-812f-48a1-ad2e-ae74d672a349" containerName="registry-server" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.010468 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="9489a8f8-812f-48a1-ad2e-ae74d672a349" containerName="registry-server" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.011869 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.023131 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vrsmc"] Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.061034 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a30d595f-821c-4876-970d-1528b04ef96d-utilities\") pod \"redhat-operators-vrsmc\" (UID: \"a30d595f-821c-4876-970d-1528b04ef96d\") " pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.061107 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hk99\" (UniqueName: \"kubernetes.io/projected/a30d595f-821c-4876-970d-1528b04ef96d-kube-api-access-5hk99\") pod \"redhat-operators-vrsmc\" (UID: \"a30d595f-821c-4876-970d-1528b04ef96d\") " pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.061236 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a30d595f-821c-4876-970d-1528b04ef96d-catalog-content\") pod \"redhat-operators-vrsmc\" (UID: \"a30d595f-821c-4876-970d-1528b04ef96d\") " pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.163286 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hk99\" (UniqueName: \"kubernetes.io/projected/a30d595f-821c-4876-970d-1528b04ef96d-kube-api-access-5hk99\") pod \"redhat-operators-vrsmc\" (UID: \"a30d595f-821c-4876-970d-1528b04ef96d\") " pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.163469 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a30d595f-821c-4876-970d-1528b04ef96d-catalog-content\") pod \"redhat-operators-vrsmc\" (UID: \"a30d595f-821c-4876-970d-1528b04ef96d\") " pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.163526 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a30d595f-821c-4876-970d-1528b04ef96d-utilities\") pod \"redhat-operators-vrsmc\" (UID: \"a30d595f-821c-4876-970d-1528b04ef96d\") " pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.164145 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a30d595f-821c-4876-970d-1528b04ef96d-catalog-content\") pod \"redhat-operators-vrsmc\" (UID: \"a30d595f-821c-4876-970d-1528b04ef96d\") " pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.164218 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a30d595f-821c-4876-970d-1528b04ef96d-utilities\") pod \"redhat-operators-vrsmc\" (UID: \"a30d595f-821c-4876-970d-1528b04ef96d\") " pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.191295 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hk99\" (UniqueName: \"kubernetes.io/projected/a30d595f-821c-4876-970d-1528b04ef96d-kube-api-access-5hk99\") pod \"redhat-operators-vrsmc\" (UID: \"a30d595f-821c-4876-970d-1528b04ef96d\") " pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.341729 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:33 crc kubenswrapper[4910]: I1125 21:56:33.857280 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vrsmc"] Nov 25 21:56:34 crc kubenswrapper[4910]: I1125 21:56:34.204629 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:56:34 crc kubenswrapper[4910]: E1125 21:56:34.204895 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:56:34 crc kubenswrapper[4910]: I1125 21:56:34.692875 4910 generic.go:334] "Generic (PLEG): container finished" podID="a30d595f-821c-4876-970d-1528b04ef96d" containerID="eb746d5c31271b1f0bd0b5b8ef2a7a2ae2fde8af9d44efde1c5dd737371b6696" exitCode=0 Nov 25 21:56:34 crc kubenswrapper[4910]: I1125 21:56:34.692922 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrsmc" event={"ID":"a30d595f-821c-4876-970d-1528b04ef96d","Type":"ContainerDied","Data":"eb746d5c31271b1f0bd0b5b8ef2a7a2ae2fde8af9d44efde1c5dd737371b6696"} Nov 25 21:56:34 crc kubenswrapper[4910]: I1125 21:56:34.692961 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrsmc" event={"ID":"a30d595f-821c-4876-970d-1528b04ef96d","Type":"ContainerStarted","Data":"94471c451066f1d021ff13ee75981b8ca8d96e0c974bea03d64f71d40b1f40e7"} Nov 25 21:56:36 crc kubenswrapper[4910]: I1125 21:56:36.721977 4910 generic.go:334] "Generic (PLEG): container finished" podID="a30d595f-821c-4876-970d-1528b04ef96d" containerID="74ce37d19d859124045aa998768b527c939200eabc81c0a01eec0d30e24ddbf1" exitCode=0 Nov 25 21:56:36 crc kubenswrapper[4910]: I1125 21:56:36.722146 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrsmc" event={"ID":"a30d595f-821c-4876-970d-1528b04ef96d","Type":"ContainerDied","Data":"74ce37d19d859124045aa998768b527c939200eabc81c0a01eec0d30e24ddbf1"} Nov 25 21:56:37 crc kubenswrapper[4910]: I1125 21:56:37.737112 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrsmc" event={"ID":"a30d595f-821c-4876-970d-1528b04ef96d","Type":"ContainerStarted","Data":"d896089824329033ae82dd48f9fd71dd4b3e05d94c5280377807924a50c85026"} Nov 25 21:56:37 crc kubenswrapper[4910]: I1125 21:56:37.761990 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vrsmc" podStartSLOduration=3.269544054 podStartE2EDuration="5.761970086s" podCreationTimestamp="2025-11-25 21:56:32 +0000 UTC" firstStartedPulling="2025-11-25 21:56:34.696583794 +0000 UTC m=+1550.159060106" lastFinishedPulling="2025-11-25 21:56:37.189009816 +0000 UTC m=+1552.651486138" observedRunningTime="2025-11-25 21:56:37.759824879 +0000 UTC m=+1553.222301211" watchObservedRunningTime="2025-11-25 21:56:37.761970086 +0000 UTC m=+1553.224446418" Nov 25 21:56:43 crc kubenswrapper[4910]: I1125 21:56:43.342450 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:43 crc kubenswrapper[4910]: I1125 21:56:43.343028 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:44 crc kubenswrapper[4910]: I1125 21:56:44.402646 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-vrsmc" podUID="a30d595f-821c-4876-970d-1528b04ef96d" containerName="registry-server" probeResult="failure" output=< Nov 25 21:56:44 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Nov 25 21:56:44 crc kubenswrapper[4910]: > Nov 25 21:56:47 crc kubenswrapper[4910]: I1125 21:56:47.204512 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:56:47 crc kubenswrapper[4910]: E1125 21:56:47.205474 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:56:53 crc kubenswrapper[4910]: I1125 21:56:53.007445 4910 scope.go:117] "RemoveContainer" containerID="73145759e761c29ad7989016538415ec0e9ec4cb7b2b50ab24c7b4654ede52e7" Nov 25 21:56:53 crc kubenswrapper[4910]: I1125 21:56:53.059517 4910 scope.go:117] "RemoveContainer" containerID="5421ffe0e09ed20becc521690783484de0df47c749ae407a847e9e196592b824" Nov 25 21:56:53 crc kubenswrapper[4910]: I1125 21:56:53.103443 4910 scope.go:117] "RemoveContainer" containerID="cc9423bd17f7832ce59a7b28a0d1f93b64b559fe8bcada446624da78cba695a2" Nov 25 21:56:53 crc kubenswrapper[4910]: I1125 21:56:53.150254 4910 scope.go:117] "RemoveContainer" containerID="cd16da346a97bc1763b4cc7a7a59cd128690a18d42b7519b103b301f7023350a" Nov 25 21:56:53 crc kubenswrapper[4910]: I1125 21:56:53.203584 4910 scope.go:117] "RemoveContainer" containerID="c2d23beb2a8777214bfa1bf6da8f94e3a83fd1fc95e0f3478400c006ae36c99d" Nov 25 21:56:53 crc kubenswrapper[4910]: I1125 21:56:53.287251 4910 scope.go:117] "RemoveContainer" containerID="a9da1698719a8f65a192e7da50cf4b8a2bd0117f9e5bddaf32d284a0150b925a" Nov 25 21:56:53 crc kubenswrapper[4910]: I1125 21:56:53.314485 4910 scope.go:117] "RemoveContainer" containerID="bea635cf438c7b7d19a6264b0b6718311338cf2765128c5a59caf7e363224bad" Nov 25 21:56:53 crc kubenswrapper[4910]: I1125 21:56:53.368845 4910 scope.go:117] "RemoveContainer" containerID="6f3b9e563c55778a3e4df96b6b499c37bb8d1813e13545b12f847ce499f4bf72" Nov 25 21:56:53 crc kubenswrapper[4910]: I1125 21:56:53.418209 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:53 crc kubenswrapper[4910]: I1125 21:56:53.475669 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:53 crc kubenswrapper[4910]: I1125 21:56:53.669498 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vrsmc"] Nov 25 21:56:54 crc kubenswrapper[4910]: I1125 21:56:54.947811 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vrsmc" podUID="a30d595f-821c-4876-970d-1528b04ef96d" containerName="registry-server" containerID="cri-o://d896089824329033ae82dd48f9fd71dd4b3e05d94c5280377807924a50c85026" gracePeriod=2 Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.577142 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.706355 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a30d595f-821c-4876-970d-1528b04ef96d-utilities\") pod \"a30d595f-821c-4876-970d-1528b04ef96d\" (UID: \"a30d595f-821c-4876-970d-1528b04ef96d\") " Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.706596 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a30d595f-821c-4876-970d-1528b04ef96d-catalog-content\") pod \"a30d595f-821c-4876-970d-1528b04ef96d\" (UID: \"a30d595f-821c-4876-970d-1528b04ef96d\") " Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.706663 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hk99\" (UniqueName: \"kubernetes.io/projected/a30d595f-821c-4876-970d-1528b04ef96d-kube-api-access-5hk99\") pod \"a30d595f-821c-4876-970d-1528b04ef96d\" (UID: \"a30d595f-821c-4876-970d-1528b04ef96d\") " Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.708014 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a30d595f-821c-4876-970d-1528b04ef96d-utilities" (OuterVolumeSpecName: "utilities") pod "a30d595f-821c-4876-970d-1528b04ef96d" (UID: "a30d595f-821c-4876-970d-1528b04ef96d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.718037 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a30d595f-821c-4876-970d-1528b04ef96d-kube-api-access-5hk99" (OuterVolumeSpecName: "kube-api-access-5hk99") pod "a30d595f-821c-4876-970d-1528b04ef96d" (UID: "a30d595f-821c-4876-970d-1528b04ef96d"). InnerVolumeSpecName "kube-api-access-5hk99". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.808960 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a30d595f-821c-4876-970d-1528b04ef96d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.809002 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hk99\" (UniqueName: \"kubernetes.io/projected/a30d595f-821c-4876-970d-1528b04ef96d-kube-api-access-5hk99\") on node \"crc\" DevicePath \"\"" Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.812592 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a30d595f-821c-4876-970d-1528b04ef96d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a30d595f-821c-4876-970d-1528b04ef96d" (UID: "a30d595f-821c-4876-970d-1528b04ef96d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.911745 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a30d595f-821c-4876-970d-1528b04ef96d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.968090 4910 generic.go:334] "Generic (PLEG): container finished" podID="a30d595f-821c-4876-970d-1528b04ef96d" containerID="d896089824329033ae82dd48f9fd71dd4b3e05d94c5280377807924a50c85026" exitCode=0 Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.968140 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrsmc" event={"ID":"a30d595f-821c-4876-970d-1528b04ef96d","Type":"ContainerDied","Data":"d896089824329033ae82dd48f9fd71dd4b3e05d94c5280377807924a50c85026"} Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.968178 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vrsmc" event={"ID":"a30d595f-821c-4876-970d-1528b04ef96d","Type":"ContainerDied","Data":"94471c451066f1d021ff13ee75981b8ca8d96e0c974bea03d64f71d40b1f40e7"} Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.968198 4910 scope.go:117] "RemoveContainer" containerID="d896089824329033ae82dd48f9fd71dd4b3e05d94c5280377807924a50c85026" Nov 25 21:56:55 crc kubenswrapper[4910]: I1125 21:56:55.968259 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vrsmc" Nov 25 21:56:56 crc kubenswrapper[4910]: I1125 21:56:56.008639 4910 scope.go:117] "RemoveContainer" containerID="74ce37d19d859124045aa998768b527c939200eabc81c0a01eec0d30e24ddbf1" Nov 25 21:56:56 crc kubenswrapper[4910]: I1125 21:56:56.025156 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vrsmc"] Nov 25 21:56:56 crc kubenswrapper[4910]: I1125 21:56:56.039097 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vrsmc"] Nov 25 21:56:56 crc kubenswrapper[4910]: I1125 21:56:56.046673 4910 scope.go:117] "RemoveContainer" containerID="eb746d5c31271b1f0bd0b5b8ef2a7a2ae2fde8af9d44efde1c5dd737371b6696" Nov 25 21:56:56 crc kubenswrapper[4910]: I1125 21:56:56.086713 4910 scope.go:117] "RemoveContainer" containerID="d896089824329033ae82dd48f9fd71dd4b3e05d94c5280377807924a50c85026" Nov 25 21:56:56 crc kubenswrapper[4910]: E1125 21:56:56.087700 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d896089824329033ae82dd48f9fd71dd4b3e05d94c5280377807924a50c85026\": container with ID starting with d896089824329033ae82dd48f9fd71dd4b3e05d94c5280377807924a50c85026 not found: ID does not exist" containerID="d896089824329033ae82dd48f9fd71dd4b3e05d94c5280377807924a50c85026" Nov 25 21:56:56 crc kubenswrapper[4910]: I1125 21:56:56.087736 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d896089824329033ae82dd48f9fd71dd4b3e05d94c5280377807924a50c85026"} err="failed to get container status \"d896089824329033ae82dd48f9fd71dd4b3e05d94c5280377807924a50c85026\": rpc error: code = NotFound desc = could not find container \"d896089824329033ae82dd48f9fd71dd4b3e05d94c5280377807924a50c85026\": container with ID starting with d896089824329033ae82dd48f9fd71dd4b3e05d94c5280377807924a50c85026 not found: ID does not exist" Nov 25 21:56:56 crc kubenswrapper[4910]: I1125 21:56:56.087759 4910 scope.go:117] "RemoveContainer" containerID="74ce37d19d859124045aa998768b527c939200eabc81c0a01eec0d30e24ddbf1" Nov 25 21:56:56 crc kubenswrapper[4910]: E1125 21:56:56.088210 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74ce37d19d859124045aa998768b527c939200eabc81c0a01eec0d30e24ddbf1\": container with ID starting with 74ce37d19d859124045aa998768b527c939200eabc81c0a01eec0d30e24ddbf1 not found: ID does not exist" containerID="74ce37d19d859124045aa998768b527c939200eabc81c0a01eec0d30e24ddbf1" Nov 25 21:56:56 crc kubenswrapper[4910]: I1125 21:56:56.088316 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74ce37d19d859124045aa998768b527c939200eabc81c0a01eec0d30e24ddbf1"} err="failed to get container status \"74ce37d19d859124045aa998768b527c939200eabc81c0a01eec0d30e24ddbf1\": rpc error: code = NotFound desc = could not find container \"74ce37d19d859124045aa998768b527c939200eabc81c0a01eec0d30e24ddbf1\": container with ID starting with 74ce37d19d859124045aa998768b527c939200eabc81c0a01eec0d30e24ddbf1 not found: ID does not exist" Nov 25 21:56:56 crc kubenswrapper[4910]: I1125 21:56:56.088359 4910 scope.go:117] "RemoveContainer" containerID="eb746d5c31271b1f0bd0b5b8ef2a7a2ae2fde8af9d44efde1c5dd737371b6696" Nov 25 21:56:56 crc kubenswrapper[4910]: E1125 21:56:56.088865 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb746d5c31271b1f0bd0b5b8ef2a7a2ae2fde8af9d44efde1c5dd737371b6696\": container with ID starting with eb746d5c31271b1f0bd0b5b8ef2a7a2ae2fde8af9d44efde1c5dd737371b6696 not found: ID does not exist" containerID="eb746d5c31271b1f0bd0b5b8ef2a7a2ae2fde8af9d44efde1c5dd737371b6696" Nov 25 21:56:56 crc kubenswrapper[4910]: I1125 21:56:56.089074 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb746d5c31271b1f0bd0b5b8ef2a7a2ae2fde8af9d44efde1c5dd737371b6696"} err="failed to get container status \"eb746d5c31271b1f0bd0b5b8ef2a7a2ae2fde8af9d44efde1c5dd737371b6696\": rpc error: code = NotFound desc = could not find container \"eb746d5c31271b1f0bd0b5b8ef2a7a2ae2fde8af9d44efde1c5dd737371b6696\": container with ID starting with eb746d5c31271b1f0bd0b5b8ef2a7a2ae2fde8af9d44efde1c5dd737371b6696 not found: ID does not exist" Nov 25 21:56:57 crc kubenswrapper[4910]: I1125 21:56:57.227486 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a30d595f-821c-4876-970d-1528b04ef96d" path="/var/lib/kubelet/pods/a30d595f-821c-4876-970d-1528b04ef96d/volumes" Nov 25 21:57:01 crc kubenswrapper[4910]: I1125 21:57:01.062226 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-r42pw"] Nov 25 21:57:01 crc kubenswrapper[4910]: I1125 21:57:01.078341 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-r42pw"] Nov 25 21:57:01 crc kubenswrapper[4910]: I1125 21:57:01.220991 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5" path="/var/lib/kubelet/pods/6ee1badb-9c33-4b86-b2c2-cd96e6c30bc5/volumes" Nov 25 21:57:02 crc kubenswrapper[4910]: I1125 21:57:02.205618 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:57:02 crc kubenswrapper[4910]: E1125 21:57:02.206084 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.051316 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-24c5-account-create-update-nzg6c"] Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.063677 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-v2wl2"] Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.075167 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-7fe9-account-create-update-gsm7z"] Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.084465 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-v2wl2"] Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.092626 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-jk49q"] Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.100487 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-4388-account-create-update-5w896"] Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.108708 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-24c5-account-create-update-nzg6c"] Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.126735 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-7fe9-account-create-update-gsm7z"] Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.144457 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-jk49q"] Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.158661 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-4388-account-create-update-5w896"] Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.170267 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-krdcz"] Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.180662 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-krdcz"] Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.231748 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3da4b4c4-0881-45a6-b144-c65dd3c93740" path="/var/lib/kubelet/pods/3da4b4c4-0881-45a6-b144-c65dd3c93740/volumes" Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.236115 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="51765f02-63d6-48ec-a2d9-6b12e042c76a" path="/var/lib/kubelet/pods/51765f02-63d6-48ec-a2d9-6b12e042c76a/volumes" Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.238461 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69a77984-0bf7-456c-97cc-f2db984fa1f6" path="/var/lib/kubelet/pods/69a77984-0bf7-456c-97cc-f2db984fa1f6/volumes" Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.239299 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8eda8c66-7cc6-4516-9f62-d2e54ba7345e" path="/var/lib/kubelet/pods/8eda8c66-7cc6-4516-9f62-d2e54ba7345e/volumes" Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.240058 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac08d9d1-b92d-4e30-ae39-02e1d31b7545" path="/var/lib/kubelet/pods/ac08d9d1-b92d-4e30-ae39-02e1d31b7545/volumes" Nov 25 21:57:07 crc kubenswrapper[4910]: I1125 21:57:07.241516 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfe62699-06b4-4275-a302-15969eef2435" path="/var/lib/kubelet/pods/cfe62699-06b4-4275-a302-15969eef2435/volumes" Nov 25 21:57:15 crc kubenswrapper[4910]: I1125 21:57:15.086469 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-sq4pt"] Nov 25 21:57:15 crc kubenswrapper[4910]: I1125 21:57:15.105557 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-sq4pt"] Nov 25 21:57:15 crc kubenswrapper[4910]: I1125 21:57:15.219292 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:57:15 crc kubenswrapper[4910]: E1125 21:57:15.220014 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:57:15 crc kubenswrapper[4910]: I1125 21:57:15.226807 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37737937-a670-4168-afca-ff5157233184" path="/var/lib/kubelet/pods/37737937-a670-4168-afca-ff5157233184/volumes" Nov 25 21:57:26 crc kubenswrapper[4910]: I1125 21:57:26.205129 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:57:26 crc kubenswrapper[4910]: E1125 21:57:26.206073 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:57:26 crc kubenswrapper[4910]: I1125 21:57:26.389931 4910 generic.go:334] "Generic (PLEG): container finished" podID="a6f639c3-729d-4c6a-9e97-afb151569af5" containerID="e4a23da45ee047e3b6b457b6df214baca7f689b35562b1608345063e9bc78241" exitCode=0 Nov 25 21:57:26 crc kubenswrapper[4910]: I1125 21:57:26.390003 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" event={"ID":"a6f639c3-729d-4c6a-9e97-afb151569af5","Type":"ContainerDied","Data":"e4a23da45ee047e3b6b457b6df214baca7f689b35562b1608345063e9bc78241"} Nov 25 21:57:27 crc kubenswrapper[4910]: I1125 21:57:27.945208 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:27.999987 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6f639c3-729d-4c6a-9e97-afb151569af5-ssh-key\") pod \"a6f639c3-729d-4c6a-9e97-afb151569af5\" (UID: \"a6f639c3-729d-4c6a-9e97-afb151569af5\") " Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.000111 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6f639c3-729d-4c6a-9e97-afb151569af5-inventory\") pod \"a6f639c3-729d-4c6a-9e97-afb151569af5\" (UID: \"a6f639c3-729d-4c6a-9e97-afb151569af5\") " Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.000640 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9gstq\" (UniqueName: \"kubernetes.io/projected/a6f639c3-729d-4c6a-9e97-afb151569af5-kube-api-access-9gstq\") pod \"a6f639c3-729d-4c6a-9e97-afb151569af5\" (UID: \"a6f639c3-729d-4c6a-9e97-afb151569af5\") " Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.007463 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6f639c3-729d-4c6a-9e97-afb151569af5-kube-api-access-9gstq" (OuterVolumeSpecName: "kube-api-access-9gstq") pod "a6f639c3-729d-4c6a-9e97-afb151569af5" (UID: "a6f639c3-729d-4c6a-9e97-afb151569af5"). InnerVolumeSpecName "kube-api-access-9gstq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.037089 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6f639c3-729d-4c6a-9e97-afb151569af5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a6f639c3-729d-4c6a-9e97-afb151569af5" (UID: "a6f639c3-729d-4c6a-9e97-afb151569af5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.047437 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6f639c3-729d-4c6a-9e97-afb151569af5-inventory" (OuterVolumeSpecName: "inventory") pod "a6f639c3-729d-4c6a-9e97-afb151569af5" (UID: "a6f639c3-729d-4c6a-9e97-afb151569af5"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.104753 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a6f639c3-729d-4c6a-9e97-afb151569af5-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.104791 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a6f639c3-729d-4c6a-9e97-afb151569af5-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.104805 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9gstq\" (UniqueName: \"kubernetes.io/projected/a6f639c3-729d-4c6a-9e97-afb151569af5-kube-api-access-9gstq\") on node \"crc\" DevicePath \"\"" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.425747 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" event={"ID":"a6f639c3-729d-4c6a-9e97-afb151569af5","Type":"ContainerDied","Data":"91bf74b76db084dc0d30bd832e7728ae5d70a33d1cc538a75642b5b88fc99d8f"} Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.425806 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91bf74b76db084dc0d30bd832e7728ae5d70a33d1cc538a75642b5b88fc99d8f" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.425881 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-24hvs" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.545776 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz"] Nov 25 21:57:28 crc kubenswrapper[4910]: E1125 21:57:28.546515 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6f639c3-729d-4c6a-9e97-afb151569af5" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.546545 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6f639c3-729d-4c6a-9e97-afb151569af5" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 21:57:28 crc kubenswrapper[4910]: E1125 21:57:28.546567 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a30d595f-821c-4876-970d-1528b04ef96d" containerName="extract-utilities" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.546577 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a30d595f-821c-4876-970d-1528b04ef96d" containerName="extract-utilities" Nov 25 21:57:28 crc kubenswrapper[4910]: E1125 21:57:28.546624 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a30d595f-821c-4876-970d-1528b04ef96d" containerName="extract-content" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.546633 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a30d595f-821c-4876-970d-1528b04ef96d" containerName="extract-content" Nov 25 21:57:28 crc kubenswrapper[4910]: E1125 21:57:28.546649 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a30d595f-821c-4876-970d-1528b04ef96d" containerName="registry-server" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.546657 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a30d595f-821c-4876-970d-1528b04ef96d" containerName="registry-server" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.546930 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a30d595f-821c-4876-970d-1528b04ef96d" containerName="registry-server" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.546967 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6f639c3-729d-4c6a-9e97-afb151569af5" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.548031 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.550714 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.551444 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.551676 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.551869 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.555732 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz"] Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.615562 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5h9z\" (UniqueName: \"kubernetes.io/projected/dc112a56-de9c-47b3-8ca9-c0225469f85c-kube-api-access-x5h9z\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz\" (UID: \"dc112a56-de9c-47b3-8ca9-c0225469f85c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.615660 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc112a56-de9c-47b3-8ca9-c0225469f85c-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz\" (UID: \"dc112a56-de9c-47b3-8ca9-c0225469f85c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.615729 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc112a56-de9c-47b3-8ca9-c0225469f85c-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz\" (UID: \"dc112a56-de9c-47b3-8ca9-c0225469f85c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.717312 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc112a56-de9c-47b3-8ca9-c0225469f85c-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz\" (UID: \"dc112a56-de9c-47b3-8ca9-c0225469f85c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.717456 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc112a56-de9c-47b3-8ca9-c0225469f85c-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz\" (UID: \"dc112a56-de9c-47b3-8ca9-c0225469f85c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.717553 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5h9z\" (UniqueName: \"kubernetes.io/projected/dc112a56-de9c-47b3-8ca9-c0225469f85c-kube-api-access-x5h9z\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz\" (UID: \"dc112a56-de9c-47b3-8ca9-c0225469f85c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.721871 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc112a56-de9c-47b3-8ca9-c0225469f85c-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz\" (UID: \"dc112a56-de9c-47b3-8ca9-c0225469f85c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.725054 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc112a56-de9c-47b3-8ca9-c0225469f85c-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz\" (UID: \"dc112a56-de9c-47b3-8ca9-c0225469f85c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.755839 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5h9z\" (UniqueName: \"kubernetes.io/projected/dc112a56-de9c-47b3-8ca9-c0225469f85c-kube-api-access-x5h9z\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz\" (UID: \"dc112a56-de9c-47b3-8ca9-c0225469f85c\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" Nov 25 21:57:28 crc kubenswrapper[4910]: I1125 21:57:28.879451 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" Nov 25 21:57:29 crc kubenswrapper[4910]: I1125 21:57:29.578850 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz"] Nov 25 21:57:30 crc kubenswrapper[4910]: I1125 21:57:30.468000 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" event={"ID":"dc112a56-de9c-47b3-8ca9-c0225469f85c","Type":"ContainerStarted","Data":"c61e3ccf2af706b1843266dab365d4b2afd27863b351a75e909cfffa8f08bca6"} Nov 25 21:57:30 crc kubenswrapper[4910]: I1125 21:57:30.468518 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" event={"ID":"dc112a56-de9c-47b3-8ca9-c0225469f85c","Type":"ContainerStarted","Data":"34051707b510eda7d430cf5a76cac3ee7de9522d44aaaf3b2ed565e3139df27a"} Nov 25 21:57:30 crc kubenswrapper[4910]: I1125 21:57:30.490469 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" podStartSLOduration=1.905972791 podStartE2EDuration="2.490443489s" podCreationTimestamp="2025-11-25 21:57:28 +0000 UTC" firstStartedPulling="2025-11-25 21:57:29.598813377 +0000 UTC m=+1605.061289719" lastFinishedPulling="2025-11-25 21:57:30.183284095 +0000 UTC m=+1605.645760417" observedRunningTime="2025-11-25 21:57:30.486127894 +0000 UTC m=+1605.948604246" watchObservedRunningTime="2025-11-25 21:57:30.490443489 +0000 UTC m=+1605.952919851" Nov 25 21:57:38 crc kubenswrapper[4910]: I1125 21:57:38.204668 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:57:38 crc kubenswrapper[4910]: E1125 21:57:38.206361 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:57:46 crc kubenswrapper[4910]: I1125 21:57:46.067208 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-dvs77"] Nov 25 21:57:46 crc kubenswrapper[4910]: I1125 21:57:46.080616 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-dvs77"] Nov 25 21:57:47 crc kubenswrapper[4910]: I1125 21:57:47.217207 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19f21ae5-3e49-410e-a481-00e837d94c6c" path="/var/lib/kubelet/pods/19f21ae5-3e49-410e-a481-00e837d94c6c/volumes" Nov 25 21:57:50 crc kubenswrapper[4910]: I1125 21:57:50.204499 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:57:50 crc kubenswrapper[4910]: E1125 21:57:50.205495 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:57:53 crc kubenswrapper[4910]: I1125 21:57:53.641915 4910 scope.go:117] "RemoveContainer" containerID="978b6581d3d12beb736377058bc6bc5cf10cc01a5f6690b2782b3dac3e96f341" Nov 25 21:57:53 crc kubenswrapper[4910]: I1125 21:57:53.698657 4910 scope.go:117] "RemoveContainer" containerID="7309a460941907cb78a18937c8ef9484ddc0d443e13f7732170e4dd471d50805" Nov 25 21:57:53 crc kubenswrapper[4910]: I1125 21:57:53.759452 4910 scope.go:117] "RemoveContainer" containerID="7714ec33692b75cb39718e60df64e30bbed48f36216d945b74baf39db7e0d990" Nov 25 21:57:53 crc kubenswrapper[4910]: I1125 21:57:53.810924 4910 scope.go:117] "RemoveContainer" containerID="f0e7f65e6221131bc0c882d017ba379345fdf9bb179ce7b58e924d329b1dbb62" Nov 25 21:57:53 crc kubenswrapper[4910]: I1125 21:57:53.904852 4910 scope.go:117] "RemoveContainer" containerID="e2d911f256192b349ab826bb2617841035c65fd6fc2dd122b5d10cdb5ea5ef73" Nov 25 21:57:53 crc kubenswrapper[4910]: I1125 21:57:53.951453 4910 scope.go:117] "RemoveContainer" containerID="6827e820a8a940d03b264cc4a01017c07c40ada5bdf8281ce277b5fb7c13684a" Nov 25 21:57:53 crc kubenswrapper[4910]: I1125 21:57:53.983875 4910 scope.go:117] "RemoveContainer" containerID="06cd7fd5468cb58b313eeedcfe1ca3e0e24865bcdd66699699dfdfebedc61723" Nov 25 21:57:54 crc kubenswrapper[4910]: I1125 21:57:54.010883 4910 scope.go:117] "RemoveContainer" containerID="9bc0b65a04786da440d32b6d8ce6042ebc0f1304b0bf1a4c728c579c5922eb82" Nov 25 21:57:54 crc kubenswrapper[4910]: I1125 21:57:54.038617 4910 scope.go:117] "RemoveContainer" containerID="6bc31831bf45f15d586f1ccc78f2279150243f53b01bc0680e3e0fe25b0f8dd2" Nov 25 21:57:56 crc kubenswrapper[4910]: I1125 21:57:56.081201 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-hzbxg"] Nov 25 21:57:56 crc kubenswrapper[4910]: I1125 21:57:56.096093 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-hzbxg"] Nov 25 21:57:57 crc kubenswrapper[4910]: I1125 21:57:57.219417 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4" path="/var/lib/kubelet/pods/3e5f12ad-5f8b-4e8e-854c-ecc1e6fd46c4/volumes" Nov 25 21:58:01 crc kubenswrapper[4910]: I1125 21:58:01.204887 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:58:01 crc kubenswrapper[4910]: E1125 21:58:01.205863 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:58:07 crc kubenswrapper[4910]: I1125 21:58:07.043744 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-djprh"] Nov 25 21:58:07 crc kubenswrapper[4910]: I1125 21:58:07.059216 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-t82b5"] Nov 25 21:58:07 crc kubenswrapper[4910]: I1125 21:58:07.073281 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-t82b5"] Nov 25 21:58:07 crc kubenswrapper[4910]: I1125 21:58:07.083477 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-djprh"] Nov 25 21:58:07 crc kubenswrapper[4910]: I1125 21:58:07.219019 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e650049-c8bd-4a60-a1f7-1b022752ff7a" path="/var/lib/kubelet/pods/7e650049-c8bd-4a60-a1f7-1b022752ff7a/volumes" Nov 25 21:58:07 crc kubenswrapper[4910]: I1125 21:58:07.221425 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="909bc667-1a51-44ef-b676-dabab2050b4e" path="/var/lib/kubelet/pods/909bc667-1a51-44ef-b676-dabab2050b4e/volumes" Nov 25 21:58:12 crc kubenswrapper[4910]: I1125 21:58:12.052931 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-xx5nj"] Nov 25 21:58:12 crc kubenswrapper[4910]: I1125 21:58:12.068519 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-xx5nj"] Nov 25 21:58:13 crc kubenswrapper[4910]: I1125 21:58:13.206368 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:58:13 crc kubenswrapper[4910]: E1125 21:58:13.207316 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:58:13 crc kubenswrapper[4910]: I1125 21:58:13.220551 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4" path="/var/lib/kubelet/pods/d7c1bc8b-7026-4f6a-8a1b-fee1aa94f0a4/volumes" Nov 25 21:58:26 crc kubenswrapper[4910]: I1125 21:58:26.204088 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:58:26 crc kubenswrapper[4910]: E1125 21:58:26.205104 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:58:39 crc kubenswrapper[4910]: I1125 21:58:39.204426 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:58:39 crc kubenswrapper[4910]: E1125 21:58:39.205599 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:58:42 crc kubenswrapper[4910]: I1125 21:58:42.398030 4910 generic.go:334] "Generic (PLEG): container finished" podID="dc112a56-de9c-47b3-8ca9-c0225469f85c" containerID="c61e3ccf2af706b1843266dab365d4b2afd27863b351a75e909cfffa8f08bca6" exitCode=0 Nov 25 21:58:42 crc kubenswrapper[4910]: I1125 21:58:42.398129 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" event={"ID":"dc112a56-de9c-47b3-8ca9-c0225469f85c","Type":"ContainerDied","Data":"c61e3ccf2af706b1843266dab365d4b2afd27863b351a75e909cfffa8f08bca6"} Nov 25 21:58:43 crc kubenswrapper[4910]: I1125 21:58:43.951595 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.034098 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc112a56-de9c-47b3-8ca9-c0225469f85c-inventory\") pod \"dc112a56-de9c-47b3-8ca9-c0225469f85c\" (UID: \"dc112a56-de9c-47b3-8ca9-c0225469f85c\") " Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.034314 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc112a56-de9c-47b3-8ca9-c0225469f85c-ssh-key\") pod \"dc112a56-de9c-47b3-8ca9-c0225469f85c\" (UID: \"dc112a56-de9c-47b3-8ca9-c0225469f85c\") " Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.034481 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5h9z\" (UniqueName: \"kubernetes.io/projected/dc112a56-de9c-47b3-8ca9-c0225469f85c-kube-api-access-x5h9z\") pod \"dc112a56-de9c-47b3-8ca9-c0225469f85c\" (UID: \"dc112a56-de9c-47b3-8ca9-c0225469f85c\") " Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.042629 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc112a56-de9c-47b3-8ca9-c0225469f85c-kube-api-access-x5h9z" (OuterVolumeSpecName: "kube-api-access-x5h9z") pod "dc112a56-de9c-47b3-8ca9-c0225469f85c" (UID: "dc112a56-de9c-47b3-8ca9-c0225469f85c"). InnerVolumeSpecName "kube-api-access-x5h9z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.073876 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc112a56-de9c-47b3-8ca9-c0225469f85c-inventory" (OuterVolumeSpecName: "inventory") pod "dc112a56-de9c-47b3-8ca9-c0225469f85c" (UID: "dc112a56-de9c-47b3-8ca9-c0225469f85c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.087355 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dc112a56-de9c-47b3-8ca9-c0225469f85c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "dc112a56-de9c-47b3-8ca9-c0225469f85c" (UID: "dc112a56-de9c-47b3-8ca9-c0225469f85c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.137715 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/dc112a56-de9c-47b3-8ca9-c0225469f85c-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.137759 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/dc112a56-de9c-47b3-8ca9-c0225469f85c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.137776 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5h9z\" (UniqueName: \"kubernetes.io/projected/dc112a56-de9c-47b3-8ca9-c0225469f85c-kube-api-access-x5h9z\") on node \"crc\" DevicePath \"\"" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.422633 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" event={"ID":"dc112a56-de9c-47b3-8ca9-c0225469f85c","Type":"ContainerDied","Data":"34051707b510eda7d430cf5a76cac3ee7de9522d44aaaf3b2ed565e3139df27a"} Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.422699 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="34051707b510eda7d430cf5a76cac3ee7de9522d44aaaf3b2ed565e3139df27a" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.423108 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.547066 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj"] Nov 25 21:58:44 crc kubenswrapper[4910]: E1125 21:58:44.548040 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc112a56-de9c-47b3-8ca9-c0225469f85c" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.548141 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc112a56-de9c-47b3-8ca9-c0225469f85c" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.548530 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc112a56-de9c-47b3-8ca9-c0225469f85c" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.549556 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.553169 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.553581 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.554075 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.554915 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.560612 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj"] Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.649593 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8581103-5144-4384-8d68-9160c64f6233-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj\" (UID: \"d8581103-5144-4384-8d68-9160c64f6233\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.649719 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8581103-5144-4384-8d68-9160c64f6233-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj\" (UID: \"d8581103-5144-4384-8d68-9160c64f6233\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.650001 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w9nhd\" (UniqueName: \"kubernetes.io/projected/d8581103-5144-4384-8d68-9160c64f6233-kube-api-access-w9nhd\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj\" (UID: \"d8581103-5144-4384-8d68-9160c64f6233\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.752279 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w9nhd\" (UniqueName: \"kubernetes.io/projected/d8581103-5144-4384-8d68-9160c64f6233-kube-api-access-w9nhd\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj\" (UID: \"d8581103-5144-4384-8d68-9160c64f6233\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.752990 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8581103-5144-4384-8d68-9160c64f6233-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj\" (UID: \"d8581103-5144-4384-8d68-9160c64f6233\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.753202 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8581103-5144-4384-8d68-9160c64f6233-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj\" (UID: \"d8581103-5144-4384-8d68-9160c64f6233\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.758193 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8581103-5144-4384-8d68-9160c64f6233-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj\" (UID: \"d8581103-5144-4384-8d68-9160c64f6233\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.759113 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8581103-5144-4384-8d68-9160c64f6233-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj\" (UID: \"d8581103-5144-4384-8d68-9160c64f6233\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.769539 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w9nhd\" (UniqueName: \"kubernetes.io/projected/d8581103-5144-4384-8d68-9160c64f6233-kube-api-access-w9nhd\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj\" (UID: \"d8581103-5144-4384-8d68-9160c64f6233\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" Nov 25 21:58:44 crc kubenswrapper[4910]: I1125 21:58:44.877116 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" Nov 25 21:58:45 crc kubenswrapper[4910]: I1125 21:58:45.307189 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj"] Nov 25 21:58:45 crc kubenswrapper[4910]: I1125 21:58:45.436843 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" event={"ID":"d8581103-5144-4384-8d68-9160c64f6233","Type":"ContainerStarted","Data":"b580eb7eb4433f61d3d214be094abc20a75bbefe6da882693980935145955ac9"} Nov 25 21:58:45 crc kubenswrapper[4910]: I1125 21:58:45.825401 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 21:58:46 crc kubenswrapper[4910]: I1125 21:58:46.464418 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" event={"ID":"d8581103-5144-4384-8d68-9160c64f6233","Type":"ContainerStarted","Data":"808c11e4463886016aa1d179d95a77d85a9c269759ae897710b8c53a859e1816"} Nov 25 21:58:46 crc kubenswrapper[4910]: I1125 21:58:46.493228 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" podStartSLOduration=1.992286177 podStartE2EDuration="2.493199455s" podCreationTimestamp="2025-11-25 21:58:44 +0000 UTC" firstStartedPulling="2025-11-25 21:58:45.320084205 +0000 UTC m=+1680.782560557" lastFinishedPulling="2025-11-25 21:58:45.820997513 +0000 UTC m=+1681.283473835" observedRunningTime="2025-11-25 21:58:46.485208882 +0000 UTC m=+1681.947685204" watchObservedRunningTime="2025-11-25 21:58:46.493199455 +0000 UTC m=+1681.955675777" Nov 25 21:58:51 crc kubenswrapper[4910]: I1125 21:58:51.061143 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-sr4sn"] Nov 25 21:58:51 crc kubenswrapper[4910]: I1125 21:58:51.081144 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-fns6z"] Nov 25 21:58:51 crc kubenswrapper[4910]: I1125 21:58:51.092046 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-fns6z"] Nov 25 21:58:51 crc kubenswrapper[4910]: I1125 21:58:51.101809 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-sr4sn"] Nov 25 21:58:51 crc kubenswrapper[4910]: I1125 21:58:51.219838 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d012ed8c-8195-4ea9-b3e3-4a3e750e8d70" path="/var/lib/kubelet/pods/d012ed8c-8195-4ea9-b3e3-4a3e750e8d70/volumes" Nov 25 21:58:51 crc kubenswrapper[4910]: I1125 21:58:51.221852 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80" path="/var/lib/kubelet/pods/e1cdceef-fc51-466e-aaf8-6c0b3b2bcf80/volumes" Nov 25 21:58:51 crc kubenswrapper[4910]: I1125 21:58:51.546182 4910 generic.go:334] "Generic (PLEG): container finished" podID="d8581103-5144-4384-8d68-9160c64f6233" containerID="808c11e4463886016aa1d179d95a77d85a9c269759ae897710b8c53a859e1816" exitCode=0 Nov 25 21:58:51 crc kubenswrapper[4910]: I1125 21:58:51.546363 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" event={"ID":"d8581103-5144-4384-8d68-9160c64f6233","Type":"ContainerDied","Data":"808c11e4463886016aa1d179d95a77d85a9c269759ae897710b8c53a859e1816"} Nov 25 21:58:52 crc kubenswrapper[4910]: I1125 21:58:52.051127 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-78559"] Nov 25 21:58:52 crc kubenswrapper[4910]: I1125 21:58:52.066816 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-d4c6-account-create-update-4bczb"] Nov 25 21:58:52 crc kubenswrapper[4910]: I1125 21:58:52.077816 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-4dff-account-create-update-b7gr5"] Nov 25 21:58:52 crc kubenswrapper[4910]: I1125 21:58:52.089957 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-78559"] Nov 25 21:58:52 crc kubenswrapper[4910]: I1125 21:58:52.099284 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-d4c6-account-create-update-4bczb"] Nov 25 21:58:52 crc kubenswrapper[4910]: I1125 21:58:52.105890 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-4dff-account-create-update-b7gr5"] Nov 25 21:58:52 crc kubenswrapper[4910]: I1125 21:58:52.112385 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-3555-account-create-update-4wx9j"] Nov 25 21:58:52 crc kubenswrapper[4910]: I1125 21:58:52.119664 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-3555-account-create-update-4wx9j"] Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.112563 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.156027 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8581103-5144-4384-8d68-9160c64f6233-ssh-key\") pod \"d8581103-5144-4384-8d68-9160c64f6233\" (UID: \"d8581103-5144-4384-8d68-9160c64f6233\") " Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.156143 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8581103-5144-4384-8d68-9160c64f6233-inventory\") pod \"d8581103-5144-4384-8d68-9160c64f6233\" (UID: \"d8581103-5144-4384-8d68-9160c64f6233\") " Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.156224 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9nhd\" (UniqueName: \"kubernetes.io/projected/d8581103-5144-4384-8d68-9160c64f6233-kube-api-access-w9nhd\") pod \"d8581103-5144-4384-8d68-9160c64f6233\" (UID: \"d8581103-5144-4384-8d68-9160c64f6233\") " Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.165934 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8581103-5144-4384-8d68-9160c64f6233-kube-api-access-w9nhd" (OuterVolumeSpecName: "kube-api-access-w9nhd") pod "d8581103-5144-4384-8d68-9160c64f6233" (UID: "d8581103-5144-4384-8d68-9160c64f6233"). InnerVolumeSpecName "kube-api-access-w9nhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.190974 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8581103-5144-4384-8d68-9160c64f6233-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "d8581103-5144-4384-8d68-9160c64f6233" (UID: "d8581103-5144-4384-8d68-9160c64f6233"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.192614 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8581103-5144-4384-8d68-9160c64f6233-inventory" (OuterVolumeSpecName: "inventory") pod "d8581103-5144-4384-8d68-9160c64f6233" (UID: "d8581103-5144-4384-8d68-9160c64f6233"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.203938 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:58:53 crc kubenswrapper[4910]: E1125 21:58:53.204450 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.217928 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b417522-64b1-43ad-84e9-19795c605ebf" path="/var/lib/kubelet/pods/3b417522-64b1-43ad-84e9-19795c605ebf/volumes" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.218482 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41176817-8b80-4a07-832e-3957be57cf82" path="/var/lib/kubelet/pods/41176817-8b80-4a07-832e-3957be57cf82/volumes" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.219035 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8187b53-8402-4ae3-b580-5afa43f29e9f" path="/var/lib/kubelet/pods/d8187b53-8402-4ae3-b580-5afa43f29e9f/volumes" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.219711 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb48d3cb-07fa-4be7-bbb2-8af493a83edf" path="/var/lib/kubelet/pods/fb48d3cb-07fa-4be7-bbb2-8af493a83edf/volumes" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.258378 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9nhd\" (UniqueName: \"kubernetes.io/projected/d8581103-5144-4384-8d68-9160c64f6233-kube-api-access-w9nhd\") on node \"crc\" DevicePath \"\"" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.258424 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/d8581103-5144-4384-8d68-9160c64f6233-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.258436 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/d8581103-5144-4384-8d68-9160c64f6233-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.586718 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" event={"ID":"d8581103-5144-4384-8d68-9160c64f6233","Type":"ContainerDied","Data":"b580eb7eb4433f61d3d214be094abc20a75bbefe6da882693980935145955ac9"} Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.586924 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b580eb7eb4433f61d3d214be094abc20a75bbefe6da882693980935145955ac9" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.587220 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.705224 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4"] Nov 25 21:58:53 crc kubenswrapper[4910]: E1125 21:58:53.705899 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8581103-5144-4384-8d68-9160c64f6233" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.705925 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8581103-5144-4384-8d68-9160c64f6233" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.706184 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8581103-5144-4384-8d68-9160c64f6233" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.707163 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.709818 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.709951 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.710200 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.710454 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.737281 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4"] Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.778799 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/941bb6aa-1438-4ed4-8ed3-3e834a784a79-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sp4g4\" (UID: \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.778920 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wm74m\" (UniqueName: \"kubernetes.io/projected/941bb6aa-1438-4ed4-8ed3-3e834a784a79-kube-api-access-wm74m\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sp4g4\" (UID: \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.779154 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/941bb6aa-1438-4ed4-8ed3-3e834a784a79-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sp4g4\" (UID: \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.881557 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wm74m\" (UniqueName: \"kubernetes.io/projected/941bb6aa-1438-4ed4-8ed3-3e834a784a79-kube-api-access-wm74m\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sp4g4\" (UID: \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.881873 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/941bb6aa-1438-4ed4-8ed3-3e834a784a79-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sp4g4\" (UID: \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.881958 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/941bb6aa-1438-4ed4-8ed3-3e834a784a79-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sp4g4\" (UID: \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.889751 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/941bb6aa-1438-4ed4-8ed3-3e834a784a79-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sp4g4\" (UID: \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.891205 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/941bb6aa-1438-4ed4-8ed3-3e834a784a79-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sp4g4\" (UID: \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" Nov 25 21:58:53 crc kubenswrapper[4910]: I1125 21:58:53.911600 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wm74m\" (UniqueName: \"kubernetes.io/projected/941bb6aa-1438-4ed4-8ed3-3e834a784a79-kube-api-access-wm74m\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sp4g4\" (UID: \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" Nov 25 21:58:54 crc kubenswrapper[4910]: I1125 21:58:54.040362 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" Nov 25 21:58:54 crc kubenswrapper[4910]: I1125 21:58:54.327728 4910 scope.go:117] "RemoveContainer" containerID="98b85101cf133b251b3c4ea10424c224c5318fca5d278e330bcaa598ce042f82" Nov 25 21:58:54 crc kubenswrapper[4910]: I1125 21:58:54.394912 4910 scope.go:117] "RemoveContainer" containerID="328d173f0b4414fb83dcd47c83b8e7cc6508063c640779c31ee1e7d7c79e664a" Nov 25 21:58:54 crc kubenswrapper[4910]: I1125 21:58:54.437808 4910 scope.go:117] "RemoveContainer" containerID="d18973c06dc26f92b8f4426573f9376ee1c34a857b14ef2f24c70116170a4388" Nov 25 21:58:54 crc kubenswrapper[4910]: I1125 21:58:54.499438 4910 scope.go:117] "RemoveContainer" containerID="5b6355ce859db2972832c6cf53b1bd1aad9f41eec778230b108da7ee32408d4b" Nov 25 21:58:54 crc kubenswrapper[4910]: I1125 21:58:54.554787 4910 scope.go:117] "RemoveContainer" containerID="427cb77a54c6e69a974e8845021f58e8fe311a2d9eb4e2236253dbb18173858f" Nov 25 21:58:54 crc kubenswrapper[4910]: I1125 21:58:54.586030 4910 scope.go:117] "RemoveContainer" containerID="34cae767d7c53b22041356fd6c63ea50c31a7833801a5d2a639ab77d021aba31" Nov 25 21:58:54 crc kubenswrapper[4910]: I1125 21:58:54.634604 4910 scope.go:117] "RemoveContainer" containerID="e72efc6d386a0a844a6066db17474b506b571b83a588d2479b71f4b63625832d" Nov 25 21:58:54 crc kubenswrapper[4910]: I1125 21:58:54.668600 4910 scope.go:117] "RemoveContainer" containerID="9b54dbe72aa3d0767700a882d35711169fd8d9ac869c6d870f7b28b2cd4ea119" Nov 25 21:58:54 crc kubenswrapper[4910]: I1125 21:58:54.676457 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4"] Nov 25 21:58:54 crc kubenswrapper[4910]: I1125 21:58:54.711208 4910 scope.go:117] "RemoveContainer" containerID="56ce33239df4389259c16204e59d07a81c460822f7d19ea02b3a267d08780f3a" Nov 25 21:58:54 crc kubenswrapper[4910]: I1125 21:58:54.741578 4910 scope.go:117] "RemoveContainer" containerID="ac6c521f0abba3b2bd9e0c3410b7140b6c2fa69e307ad0aac6495f000a16ae5f" Nov 25 21:58:55 crc kubenswrapper[4910]: I1125 21:58:55.638897 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" event={"ID":"941bb6aa-1438-4ed4-8ed3-3e834a784a79","Type":"ContainerStarted","Data":"0a6e0be742fa19d741467a576141df737412b2079f0d4d72f35f1b388a23ea6b"} Nov 25 21:58:55 crc kubenswrapper[4910]: I1125 21:58:55.639351 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" event={"ID":"941bb6aa-1438-4ed4-8ed3-3e834a784a79","Type":"ContainerStarted","Data":"0452d93f7a03057d8745343a712826ecc7536af27c6e0c7e9112018597d91ed4"} Nov 25 21:58:55 crc kubenswrapper[4910]: I1125 21:58:55.668946 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" podStartSLOduration=2.233415856 podStartE2EDuration="2.66891588s" podCreationTimestamp="2025-11-25 21:58:53 +0000 UTC" firstStartedPulling="2025-11-25 21:58:54.689535353 +0000 UTC m=+1690.152011695" lastFinishedPulling="2025-11-25 21:58:55.125035377 +0000 UTC m=+1690.587511719" observedRunningTime="2025-11-25 21:58:55.654751433 +0000 UTC m=+1691.117227795" watchObservedRunningTime="2025-11-25 21:58:55.66891588 +0000 UTC m=+1691.131392202" Nov 25 21:59:06 crc kubenswrapper[4910]: I1125 21:59:06.204970 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:59:06 crc kubenswrapper[4910]: E1125 21:59:06.206733 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:59:19 crc kubenswrapper[4910]: I1125 21:59:19.205334 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:59:19 crc kubenswrapper[4910]: E1125 21:59:19.206369 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:59:25 crc kubenswrapper[4910]: I1125 21:59:25.076355 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-gswkn"] Nov 25 21:59:25 crc kubenswrapper[4910]: I1125 21:59:25.096694 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-gswkn"] Nov 25 21:59:25 crc kubenswrapper[4910]: I1125 21:59:25.247145 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="499e86de-9121-4084-b380-6bf87d8f4881" path="/var/lib/kubelet/pods/499e86de-9121-4084-b380-6bf87d8f4881/volumes" Nov 25 21:59:33 crc kubenswrapper[4910]: I1125 21:59:33.207909 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:59:33 crc kubenswrapper[4910]: E1125 21:59:33.209609 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:59:37 crc kubenswrapper[4910]: I1125 21:59:37.223722 4910 generic.go:334] "Generic (PLEG): container finished" podID="941bb6aa-1438-4ed4-8ed3-3e834a784a79" containerID="0a6e0be742fa19d741467a576141df737412b2079f0d4d72f35f1b388a23ea6b" exitCode=0 Nov 25 21:59:37 crc kubenswrapper[4910]: I1125 21:59:37.230953 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" event={"ID":"941bb6aa-1438-4ed4-8ed3-3e834a784a79","Type":"ContainerDied","Data":"0a6e0be742fa19d741467a576141df737412b2079f0d4d72f35f1b388a23ea6b"} Nov 25 21:59:38 crc kubenswrapper[4910]: I1125 21:59:38.696799 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" Nov 25 21:59:38 crc kubenswrapper[4910]: I1125 21:59:38.787618 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/941bb6aa-1438-4ed4-8ed3-3e834a784a79-inventory\") pod \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\" (UID: \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\") " Nov 25 21:59:38 crc kubenswrapper[4910]: I1125 21:59:38.787816 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wm74m\" (UniqueName: \"kubernetes.io/projected/941bb6aa-1438-4ed4-8ed3-3e834a784a79-kube-api-access-wm74m\") pod \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\" (UID: \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\") " Nov 25 21:59:38 crc kubenswrapper[4910]: I1125 21:59:38.787887 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/941bb6aa-1438-4ed4-8ed3-3e834a784a79-ssh-key\") pod \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\" (UID: \"941bb6aa-1438-4ed4-8ed3-3e834a784a79\") " Nov 25 21:59:38 crc kubenswrapper[4910]: I1125 21:59:38.795908 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/941bb6aa-1438-4ed4-8ed3-3e834a784a79-kube-api-access-wm74m" (OuterVolumeSpecName: "kube-api-access-wm74m") pod "941bb6aa-1438-4ed4-8ed3-3e834a784a79" (UID: "941bb6aa-1438-4ed4-8ed3-3e834a784a79"). InnerVolumeSpecName "kube-api-access-wm74m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 21:59:38 crc kubenswrapper[4910]: I1125 21:59:38.819102 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/941bb6aa-1438-4ed4-8ed3-3e834a784a79-inventory" (OuterVolumeSpecName: "inventory") pod "941bb6aa-1438-4ed4-8ed3-3e834a784a79" (UID: "941bb6aa-1438-4ed4-8ed3-3e834a784a79"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:59:38 crc kubenswrapper[4910]: I1125 21:59:38.819194 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/941bb6aa-1438-4ed4-8ed3-3e834a784a79-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "941bb6aa-1438-4ed4-8ed3-3e834a784a79" (UID: "941bb6aa-1438-4ed4-8ed3-3e834a784a79"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 21:59:38 crc kubenswrapper[4910]: I1125 21:59:38.890194 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/941bb6aa-1438-4ed4-8ed3-3e834a784a79-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 21:59:38 crc kubenswrapper[4910]: I1125 21:59:38.890235 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wm74m\" (UniqueName: \"kubernetes.io/projected/941bb6aa-1438-4ed4-8ed3-3e834a784a79-kube-api-access-wm74m\") on node \"crc\" DevicePath \"\"" Nov 25 21:59:38 crc kubenswrapper[4910]: I1125 21:59:38.890251 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/941bb6aa-1438-4ed4-8ed3-3e834a784a79-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.268367 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" event={"ID":"941bb6aa-1438-4ed4-8ed3-3e834a784a79","Type":"ContainerDied","Data":"0452d93f7a03057d8745343a712826ecc7536af27c6e0c7e9112018597d91ed4"} Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.268461 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0452d93f7a03057d8745343a712826ecc7536af27c6e0c7e9112018597d91ed4" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.268477 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sp4g4" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.402655 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j"] Nov 25 21:59:39 crc kubenswrapper[4910]: E1125 21:59:39.403197 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="941bb6aa-1438-4ed4-8ed3-3e834a784a79" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.403222 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="941bb6aa-1438-4ed4-8ed3-3e834a784a79" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.403525 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="941bb6aa-1438-4ed4-8ed3-3e834a784a79" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.404400 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.408702 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.408827 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.412887 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.412938 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.417779 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j"] Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.510985 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6552e880-8d31-43fc-9fee-d2e33c2ca987-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j\" (UID: \"6552e880-8d31-43fc-9fee-d2e33c2ca987\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.511642 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlkdj\" (UniqueName: \"kubernetes.io/projected/6552e880-8d31-43fc-9fee-d2e33c2ca987-kube-api-access-vlkdj\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j\" (UID: \"6552e880-8d31-43fc-9fee-d2e33c2ca987\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.511739 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6552e880-8d31-43fc-9fee-d2e33c2ca987-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j\" (UID: \"6552e880-8d31-43fc-9fee-d2e33c2ca987\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.615533 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6552e880-8d31-43fc-9fee-d2e33c2ca987-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j\" (UID: \"6552e880-8d31-43fc-9fee-d2e33c2ca987\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.615658 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlkdj\" (UniqueName: \"kubernetes.io/projected/6552e880-8d31-43fc-9fee-d2e33c2ca987-kube-api-access-vlkdj\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j\" (UID: \"6552e880-8d31-43fc-9fee-d2e33c2ca987\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.615752 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6552e880-8d31-43fc-9fee-d2e33c2ca987-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j\" (UID: \"6552e880-8d31-43fc-9fee-d2e33c2ca987\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.622401 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6552e880-8d31-43fc-9fee-d2e33c2ca987-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j\" (UID: \"6552e880-8d31-43fc-9fee-d2e33c2ca987\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.623120 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6552e880-8d31-43fc-9fee-d2e33c2ca987-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j\" (UID: \"6552e880-8d31-43fc-9fee-d2e33c2ca987\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.644814 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlkdj\" (UniqueName: \"kubernetes.io/projected/6552e880-8d31-43fc-9fee-d2e33c2ca987-kube-api-access-vlkdj\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j\" (UID: \"6552e880-8d31-43fc-9fee-d2e33c2ca987\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" Nov 25 21:59:39 crc kubenswrapper[4910]: I1125 21:59:39.768377 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" Nov 25 21:59:40 crc kubenswrapper[4910]: I1125 21:59:40.412383 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j"] Nov 25 21:59:41 crc kubenswrapper[4910]: I1125 21:59:41.301106 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" event={"ID":"6552e880-8d31-43fc-9fee-d2e33c2ca987","Type":"ContainerStarted","Data":"23f2750604ab3a0e4d7d5f2b7097e920929e234df80ad6db952c66dade15c4d5"} Nov 25 21:59:42 crc kubenswrapper[4910]: I1125 21:59:42.320386 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" event={"ID":"6552e880-8d31-43fc-9fee-d2e33c2ca987","Type":"ContainerStarted","Data":"d0f0ec780e3b6544607692baf1d111dfaacd6ef414cb4ae3df6d5245ef34653b"} Nov 25 21:59:42 crc kubenswrapper[4910]: I1125 21:59:42.352235 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" podStartSLOduration=2.643973947 podStartE2EDuration="3.35220349s" podCreationTimestamp="2025-11-25 21:59:39 +0000 UTC" firstStartedPulling="2025-11-25 21:59:40.425228232 +0000 UTC m=+1735.887704554" lastFinishedPulling="2025-11-25 21:59:41.133457735 +0000 UTC m=+1736.595934097" observedRunningTime="2025-11-25 21:59:42.345587644 +0000 UTC m=+1737.808063986" watchObservedRunningTime="2025-11-25 21:59:42.35220349 +0000 UTC m=+1737.814679822" Nov 25 21:59:46 crc kubenswrapper[4910]: I1125 21:59:46.204778 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:59:46 crc kubenswrapper[4910]: E1125 21:59:46.205541 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 21:59:50 crc kubenswrapper[4910]: I1125 21:59:50.678972 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-9qz78"] Nov 25 21:59:50 crc kubenswrapper[4910]: I1125 21:59:50.699834 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-9qz78"] Nov 25 21:59:51 crc kubenswrapper[4910]: I1125 21:59:51.046123 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-sng8f"] Nov 25 21:59:51 crc kubenswrapper[4910]: I1125 21:59:51.067101 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-sng8f"] Nov 25 21:59:51 crc kubenswrapper[4910]: I1125 21:59:51.222003 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="74e4f4a8-4449-4432-bf1a-b82789fe0d3d" path="/var/lib/kubelet/pods/74e4f4a8-4449-4432-bf1a-b82789fe0d3d/volumes" Nov 25 21:59:51 crc kubenswrapper[4910]: I1125 21:59:51.223456 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a821ca31-47ec-41c0-97c8-3d254b4f412a" path="/var/lib/kubelet/pods/a821ca31-47ec-41c0-97c8-3d254b4f412a/volumes" Nov 25 21:59:54 crc kubenswrapper[4910]: I1125 21:59:54.995789 4910 scope.go:117] "RemoveContainer" containerID="a83dd0fb339072beb5e0c6a124aaa77d14b7695af25a9a8848cdf6ab2a7846ba" Nov 25 21:59:55 crc kubenswrapper[4910]: I1125 21:59:55.043894 4910 scope.go:117] "RemoveContainer" containerID="ac5fe85faf6d03a30df279c68be256f02f6dc342b7d5c492aa000ab21c321844" Nov 25 21:59:55 crc kubenswrapper[4910]: I1125 21:59:55.136961 4910 scope.go:117] "RemoveContainer" containerID="7de067dbaf2d29d420601c9d7b4a54117b9eac09d18bf24eb53d9481ff83dec6" Nov 25 21:59:59 crc kubenswrapper[4910]: I1125 21:59:59.204517 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 21:59:59 crc kubenswrapper[4910]: E1125 21:59:59.205499 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.154922 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j"] Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.156603 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.160799 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.160799 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.182193 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j"] Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.327076 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2a27d007-69bd-497f-acf6-c66e0d55ef38-config-volume\") pod \"collect-profiles-29401800-2765j\" (UID: \"2a27d007-69bd-497f-acf6-c66e0d55ef38\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.327149 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2a27d007-69bd-497f-acf6-c66e0d55ef38-secret-volume\") pod \"collect-profiles-29401800-2765j\" (UID: \"2a27d007-69bd-497f-acf6-c66e0d55ef38\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.327445 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6qf5\" (UniqueName: \"kubernetes.io/projected/2a27d007-69bd-497f-acf6-c66e0d55ef38-kube-api-access-r6qf5\") pod \"collect-profiles-29401800-2765j\" (UID: \"2a27d007-69bd-497f-acf6-c66e0d55ef38\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.428940 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6qf5\" (UniqueName: \"kubernetes.io/projected/2a27d007-69bd-497f-acf6-c66e0d55ef38-kube-api-access-r6qf5\") pod \"collect-profiles-29401800-2765j\" (UID: \"2a27d007-69bd-497f-acf6-c66e0d55ef38\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.429632 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2a27d007-69bd-497f-acf6-c66e0d55ef38-config-volume\") pod \"collect-profiles-29401800-2765j\" (UID: \"2a27d007-69bd-497f-acf6-c66e0d55ef38\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.429710 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2a27d007-69bd-497f-acf6-c66e0d55ef38-secret-volume\") pod \"collect-profiles-29401800-2765j\" (UID: \"2a27d007-69bd-497f-acf6-c66e0d55ef38\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.430527 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2a27d007-69bd-497f-acf6-c66e0d55ef38-config-volume\") pod \"collect-profiles-29401800-2765j\" (UID: \"2a27d007-69bd-497f-acf6-c66e0d55ef38\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.445887 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2a27d007-69bd-497f-acf6-c66e0d55ef38-secret-volume\") pod \"collect-profiles-29401800-2765j\" (UID: \"2a27d007-69bd-497f-acf6-c66e0d55ef38\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.449274 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6qf5\" (UniqueName: \"kubernetes.io/projected/2a27d007-69bd-497f-acf6-c66e0d55ef38-kube-api-access-r6qf5\") pod \"collect-profiles-29401800-2765j\" (UID: \"2a27d007-69bd-497f-acf6-c66e0d55ef38\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" Nov 25 22:00:00 crc kubenswrapper[4910]: I1125 22:00:00.487811 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" Nov 25 22:00:01 crc kubenswrapper[4910]: I1125 22:00:01.085290 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j"] Nov 25 22:00:01 crc kubenswrapper[4910]: I1125 22:00:01.770107 4910 generic.go:334] "Generic (PLEG): container finished" podID="2a27d007-69bd-497f-acf6-c66e0d55ef38" containerID="2943ac850f8c2f8d612f8fdc5aa9c2e67838b22d94491265a63323472afef99e" exitCode=0 Nov 25 22:00:01 crc kubenswrapper[4910]: I1125 22:00:01.770214 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" event={"ID":"2a27d007-69bd-497f-acf6-c66e0d55ef38","Type":"ContainerDied","Data":"2943ac850f8c2f8d612f8fdc5aa9c2e67838b22d94491265a63323472afef99e"} Nov 25 22:00:01 crc kubenswrapper[4910]: I1125 22:00:01.771186 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" event={"ID":"2a27d007-69bd-497f-acf6-c66e0d55ef38","Type":"ContainerStarted","Data":"3f81c12e42a3a86ea5928bdf82c66e26755d3bfa22cc6830822b349175bd194c"} Nov 25 22:00:03 crc kubenswrapper[4910]: I1125 22:00:03.171627 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" Nov 25 22:00:03 crc kubenswrapper[4910]: I1125 22:00:03.315882 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2a27d007-69bd-497f-acf6-c66e0d55ef38-config-volume\") pod \"2a27d007-69bd-497f-acf6-c66e0d55ef38\" (UID: \"2a27d007-69bd-497f-acf6-c66e0d55ef38\") " Nov 25 22:00:03 crc kubenswrapper[4910]: I1125 22:00:03.317016 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a27d007-69bd-497f-acf6-c66e0d55ef38-config-volume" (OuterVolumeSpecName: "config-volume") pod "2a27d007-69bd-497f-acf6-c66e0d55ef38" (UID: "2a27d007-69bd-497f-acf6-c66e0d55ef38"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:00:03 crc kubenswrapper[4910]: I1125 22:00:03.317200 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r6qf5\" (UniqueName: \"kubernetes.io/projected/2a27d007-69bd-497f-acf6-c66e0d55ef38-kube-api-access-r6qf5\") pod \"2a27d007-69bd-497f-acf6-c66e0d55ef38\" (UID: \"2a27d007-69bd-497f-acf6-c66e0d55ef38\") " Nov 25 22:00:03 crc kubenswrapper[4910]: I1125 22:00:03.318214 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2a27d007-69bd-497f-acf6-c66e0d55ef38-secret-volume\") pod \"2a27d007-69bd-497f-acf6-c66e0d55ef38\" (UID: \"2a27d007-69bd-497f-acf6-c66e0d55ef38\") " Nov 25 22:00:03 crc kubenswrapper[4910]: I1125 22:00:03.320125 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2a27d007-69bd-497f-acf6-c66e0d55ef38-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 22:00:03 crc kubenswrapper[4910]: I1125 22:00:03.325578 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a27d007-69bd-497f-acf6-c66e0d55ef38-kube-api-access-r6qf5" (OuterVolumeSpecName: "kube-api-access-r6qf5") pod "2a27d007-69bd-497f-acf6-c66e0d55ef38" (UID: "2a27d007-69bd-497f-acf6-c66e0d55ef38"). InnerVolumeSpecName "kube-api-access-r6qf5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:00:03 crc kubenswrapper[4910]: I1125 22:00:03.331167 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a27d007-69bd-497f-acf6-c66e0d55ef38-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2a27d007-69bd-497f-acf6-c66e0d55ef38" (UID: "2a27d007-69bd-497f-acf6-c66e0d55ef38"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:00:03 crc kubenswrapper[4910]: I1125 22:00:03.422172 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r6qf5\" (UniqueName: \"kubernetes.io/projected/2a27d007-69bd-497f-acf6-c66e0d55ef38-kube-api-access-r6qf5\") on node \"crc\" DevicePath \"\"" Nov 25 22:00:03 crc kubenswrapper[4910]: I1125 22:00:03.422269 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2a27d007-69bd-497f-acf6-c66e0d55ef38-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 22:00:03 crc kubenswrapper[4910]: I1125 22:00:03.788783 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" event={"ID":"2a27d007-69bd-497f-acf6-c66e0d55ef38","Type":"ContainerDied","Data":"3f81c12e42a3a86ea5928bdf82c66e26755d3bfa22cc6830822b349175bd194c"} Nov 25 22:00:03 crc kubenswrapper[4910]: I1125 22:00:03.788824 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j" Nov 25 22:00:03 crc kubenswrapper[4910]: I1125 22:00:03.788846 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3f81c12e42a3a86ea5928bdf82c66e26755d3bfa22cc6830822b349175bd194c" Nov 25 22:00:13 crc kubenswrapper[4910]: I1125 22:00:13.207859 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 22:00:13 crc kubenswrapper[4910]: E1125 22:00:13.209195 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:00:25 crc kubenswrapper[4910]: I1125 22:00:25.218503 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 22:00:26 crc kubenswrapper[4910]: I1125 22:00:26.129013 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"61d77b28ec593a2c9c5098bb7f50771fc12cbdca15009996847ff51c55bf8549"} Nov 25 22:00:33 crc kubenswrapper[4910]: I1125 22:00:33.066822 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-tm28z"] Nov 25 22:00:33 crc kubenswrapper[4910]: I1125 22:00:33.081828 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-tm28z"] Nov 25 22:00:33 crc kubenswrapper[4910]: I1125 22:00:33.234498 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dd9248e6-abed-48da-948e-3cf59171c0e7" path="/var/lib/kubelet/pods/dd9248e6-abed-48da-948e-3cf59171c0e7/volumes" Nov 25 22:00:42 crc kubenswrapper[4910]: I1125 22:00:42.300549 4910 generic.go:334] "Generic (PLEG): container finished" podID="6552e880-8d31-43fc-9fee-d2e33c2ca987" containerID="d0f0ec780e3b6544607692baf1d111dfaacd6ef414cb4ae3df6d5245ef34653b" exitCode=0 Nov 25 22:00:42 crc kubenswrapper[4910]: I1125 22:00:42.300714 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" event={"ID":"6552e880-8d31-43fc-9fee-d2e33c2ca987","Type":"ContainerDied","Data":"d0f0ec780e3b6544607692baf1d111dfaacd6ef414cb4ae3df6d5245ef34653b"} Nov 25 22:00:43 crc kubenswrapper[4910]: I1125 22:00:43.797455 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" Nov 25 22:00:43 crc kubenswrapper[4910]: I1125 22:00:43.965254 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6552e880-8d31-43fc-9fee-d2e33c2ca987-inventory\") pod \"6552e880-8d31-43fc-9fee-d2e33c2ca987\" (UID: \"6552e880-8d31-43fc-9fee-d2e33c2ca987\") " Nov 25 22:00:43 crc kubenswrapper[4910]: I1125 22:00:43.965520 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6552e880-8d31-43fc-9fee-d2e33c2ca987-ssh-key\") pod \"6552e880-8d31-43fc-9fee-d2e33c2ca987\" (UID: \"6552e880-8d31-43fc-9fee-d2e33c2ca987\") " Nov 25 22:00:43 crc kubenswrapper[4910]: I1125 22:00:43.965587 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlkdj\" (UniqueName: \"kubernetes.io/projected/6552e880-8d31-43fc-9fee-d2e33c2ca987-kube-api-access-vlkdj\") pod \"6552e880-8d31-43fc-9fee-d2e33c2ca987\" (UID: \"6552e880-8d31-43fc-9fee-d2e33c2ca987\") " Nov 25 22:00:43 crc kubenswrapper[4910]: I1125 22:00:43.990780 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6552e880-8d31-43fc-9fee-d2e33c2ca987-kube-api-access-vlkdj" (OuterVolumeSpecName: "kube-api-access-vlkdj") pod "6552e880-8d31-43fc-9fee-d2e33c2ca987" (UID: "6552e880-8d31-43fc-9fee-d2e33c2ca987"). InnerVolumeSpecName "kube-api-access-vlkdj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.041559 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6552e880-8d31-43fc-9fee-d2e33c2ca987-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "6552e880-8d31-43fc-9fee-d2e33c2ca987" (UID: "6552e880-8d31-43fc-9fee-d2e33c2ca987"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.064157 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6552e880-8d31-43fc-9fee-d2e33c2ca987-inventory" (OuterVolumeSpecName: "inventory") pod "6552e880-8d31-43fc-9fee-d2e33c2ca987" (UID: "6552e880-8d31-43fc-9fee-d2e33c2ca987"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.067900 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/6552e880-8d31-43fc-9fee-d2e33c2ca987-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.067931 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlkdj\" (UniqueName: \"kubernetes.io/projected/6552e880-8d31-43fc-9fee-d2e33c2ca987-kube-api-access-vlkdj\") on node \"crc\" DevicePath \"\"" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.067944 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/6552e880-8d31-43fc-9fee-d2e33c2ca987-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.324592 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" event={"ID":"6552e880-8d31-43fc-9fee-d2e33c2ca987","Type":"ContainerDied","Data":"23f2750604ab3a0e4d7d5f2b7097e920929e234df80ad6db952c66dade15c4d5"} Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.325081 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23f2750604ab3a0e4d7d5f2b7097e920929e234df80ad6db952c66dade15c4d5" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.324659 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.426938 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-r9mbm"] Nov 25 22:00:44 crc kubenswrapper[4910]: E1125 22:00:44.427510 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a27d007-69bd-497f-acf6-c66e0d55ef38" containerName="collect-profiles" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.427534 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a27d007-69bd-497f-acf6-c66e0d55ef38" containerName="collect-profiles" Nov 25 22:00:44 crc kubenswrapper[4910]: E1125 22:00:44.427548 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6552e880-8d31-43fc-9fee-d2e33c2ca987" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.427557 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6552e880-8d31-43fc-9fee-d2e33c2ca987" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.427729 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a27d007-69bd-497f-acf6-c66e0d55ef38" containerName="collect-profiles" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.427765 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6552e880-8d31-43fc-9fee-d2e33c2ca987" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.428759 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.432136 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.432422 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.432595 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.432772 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.446861 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-r9mbm"] Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.580303 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/475e4e94-21eb-40fb-8d3d-b5359cc77a88-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-r9mbm\" (UID: \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\") " pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.580523 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xj4mb\" (UniqueName: \"kubernetes.io/projected/475e4e94-21eb-40fb-8d3d-b5359cc77a88-kube-api-access-xj4mb\") pod \"ssh-known-hosts-edpm-deployment-r9mbm\" (UID: \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\") " pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.580642 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/475e4e94-21eb-40fb-8d3d-b5359cc77a88-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-r9mbm\" (UID: \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\") " pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.683891 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/475e4e94-21eb-40fb-8d3d-b5359cc77a88-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-r9mbm\" (UID: \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\") " pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.684027 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xj4mb\" (UniqueName: \"kubernetes.io/projected/475e4e94-21eb-40fb-8d3d-b5359cc77a88-kube-api-access-xj4mb\") pod \"ssh-known-hosts-edpm-deployment-r9mbm\" (UID: \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\") " pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.684121 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/475e4e94-21eb-40fb-8d3d-b5359cc77a88-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-r9mbm\" (UID: \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\") " pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.690392 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/475e4e94-21eb-40fb-8d3d-b5359cc77a88-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-r9mbm\" (UID: \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\") " pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.692383 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/475e4e94-21eb-40fb-8d3d-b5359cc77a88-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-r9mbm\" (UID: \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\") " pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.717026 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xj4mb\" (UniqueName: \"kubernetes.io/projected/475e4e94-21eb-40fb-8d3d-b5359cc77a88-kube-api-access-xj4mb\") pod \"ssh-known-hosts-edpm-deployment-r9mbm\" (UID: \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\") " pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" Nov 25 22:00:44 crc kubenswrapper[4910]: I1125 22:00:44.774546 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" Nov 25 22:00:45 crc kubenswrapper[4910]: I1125 22:00:45.405685 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-r9mbm"] Nov 25 22:00:45 crc kubenswrapper[4910]: I1125 22:00:45.856691 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 22:00:46 crc kubenswrapper[4910]: I1125 22:00:46.349282 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" event={"ID":"475e4e94-21eb-40fb-8d3d-b5359cc77a88","Type":"ContainerStarted","Data":"c759b737282f5df00dd4ffcc8360594cea701323d8c7f814a92ab19d4e108be9"} Nov 25 22:00:46 crc kubenswrapper[4910]: I1125 22:00:46.349342 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" event={"ID":"475e4e94-21eb-40fb-8d3d-b5359cc77a88","Type":"ContainerStarted","Data":"7b36cec851335366484693f1fa7d8a83520d29267372768ee546db4830d2a3f3"} Nov 25 22:00:46 crc kubenswrapper[4910]: I1125 22:00:46.385277 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" podStartSLOduration=1.9512152249999999 podStartE2EDuration="2.385206849s" podCreationTimestamp="2025-11-25 22:00:44 +0000 UTC" firstStartedPulling="2025-11-25 22:00:45.419581939 +0000 UTC m=+1800.882058311" lastFinishedPulling="2025-11-25 22:00:45.853573603 +0000 UTC m=+1801.316049935" observedRunningTime="2025-11-25 22:00:46.379529758 +0000 UTC m=+1801.842006080" watchObservedRunningTime="2025-11-25 22:00:46.385206849 +0000 UTC m=+1801.847683211" Nov 25 22:00:55 crc kubenswrapper[4910]: I1125 22:00:55.278404 4910 scope.go:117] "RemoveContainer" containerID="3dad5fd9f70dee45b08e55b77d3c1230c80814ceb669e62513c208e736150bc9" Nov 25 22:00:55 crc kubenswrapper[4910]: I1125 22:00:55.492720 4910 generic.go:334] "Generic (PLEG): container finished" podID="475e4e94-21eb-40fb-8d3d-b5359cc77a88" containerID="c759b737282f5df00dd4ffcc8360594cea701323d8c7f814a92ab19d4e108be9" exitCode=0 Nov 25 22:00:55 crc kubenswrapper[4910]: I1125 22:00:55.492785 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" event={"ID":"475e4e94-21eb-40fb-8d3d-b5359cc77a88","Type":"ContainerDied","Data":"c759b737282f5df00dd4ffcc8360594cea701323d8c7f814a92ab19d4e108be9"} Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.016662 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.156778 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xj4mb\" (UniqueName: \"kubernetes.io/projected/475e4e94-21eb-40fb-8d3d-b5359cc77a88-kube-api-access-xj4mb\") pod \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\" (UID: \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\") " Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.156823 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/475e4e94-21eb-40fb-8d3d-b5359cc77a88-ssh-key-openstack-edpm-ipam\") pod \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\" (UID: \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\") " Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.156849 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/475e4e94-21eb-40fb-8d3d-b5359cc77a88-inventory-0\") pod \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\" (UID: \"475e4e94-21eb-40fb-8d3d-b5359cc77a88\") " Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.165380 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/475e4e94-21eb-40fb-8d3d-b5359cc77a88-kube-api-access-xj4mb" (OuterVolumeSpecName: "kube-api-access-xj4mb") pod "475e4e94-21eb-40fb-8d3d-b5359cc77a88" (UID: "475e4e94-21eb-40fb-8d3d-b5359cc77a88"). InnerVolumeSpecName "kube-api-access-xj4mb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.204383 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/475e4e94-21eb-40fb-8d3d-b5359cc77a88-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "475e4e94-21eb-40fb-8d3d-b5359cc77a88" (UID: "475e4e94-21eb-40fb-8d3d-b5359cc77a88"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.208564 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/475e4e94-21eb-40fb-8d3d-b5359cc77a88-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "475e4e94-21eb-40fb-8d3d-b5359cc77a88" (UID: "475e4e94-21eb-40fb-8d3d-b5359cc77a88"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.259375 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xj4mb\" (UniqueName: \"kubernetes.io/projected/475e4e94-21eb-40fb-8d3d-b5359cc77a88-kube-api-access-xj4mb\") on node \"crc\" DevicePath \"\"" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.259422 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/475e4e94-21eb-40fb-8d3d-b5359cc77a88-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.259439 4910 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/475e4e94-21eb-40fb-8d3d-b5359cc77a88-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.516103 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" event={"ID":"475e4e94-21eb-40fb-8d3d-b5359cc77a88","Type":"ContainerDied","Data":"7b36cec851335366484693f1fa7d8a83520d29267372768ee546db4830d2a3f3"} Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.516176 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b36cec851335366484693f1fa7d8a83520d29267372768ee546db4830d2a3f3" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.516236 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-r9mbm" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.631216 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg"] Nov 25 22:00:57 crc kubenswrapper[4910]: E1125 22:00:57.632285 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="475e4e94-21eb-40fb-8d3d-b5359cc77a88" containerName="ssh-known-hosts-edpm-deployment" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.632309 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="475e4e94-21eb-40fb-8d3d-b5359cc77a88" containerName="ssh-known-hosts-edpm-deployment" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.632608 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="475e4e94-21eb-40fb-8d3d-b5359cc77a88" containerName="ssh-known-hosts-edpm-deployment" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.633377 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.637695 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.637752 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.637896 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.638051 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.646463 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg"] Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.774670 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nsgb\" (UniqueName: \"kubernetes.io/projected/77fc796b-aaee-4dad-a82d-464aaf60ab47-kube-api-access-8nsgb\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-t7clg\" (UID: \"77fc796b-aaee-4dad-a82d-464aaf60ab47\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.774988 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77fc796b-aaee-4dad-a82d-464aaf60ab47-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-t7clg\" (UID: \"77fc796b-aaee-4dad-a82d-464aaf60ab47\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.775679 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77fc796b-aaee-4dad-a82d-464aaf60ab47-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-t7clg\" (UID: \"77fc796b-aaee-4dad-a82d-464aaf60ab47\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.877627 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77fc796b-aaee-4dad-a82d-464aaf60ab47-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-t7clg\" (UID: \"77fc796b-aaee-4dad-a82d-464aaf60ab47\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.877954 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nsgb\" (UniqueName: \"kubernetes.io/projected/77fc796b-aaee-4dad-a82d-464aaf60ab47-kube-api-access-8nsgb\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-t7clg\" (UID: \"77fc796b-aaee-4dad-a82d-464aaf60ab47\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.878110 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77fc796b-aaee-4dad-a82d-464aaf60ab47-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-t7clg\" (UID: \"77fc796b-aaee-4dad-a82d-464aaf60ab47\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.884091 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77fc796b-aaee-4dad-a82d-464aaf60ab47-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-t7clg\" (UID: \"77fc796b-aaee-4dad-a82d-464aaf60ab47\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.885751 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77fc796b-aaee-4dad-a82d-464aaf60ab47-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-t7clg\" (UID: \"77fc796b-aaee-4dad-a82d-464aaf60ab47\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.912239 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nsgb\" (UniqueName: \"kubernetes.io/projected/77fc796b-aaee-4dad-a82d-464aaf60ab47-kube-api-access-8nsgb\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-t7clg\" (UID: \"77fc796b-aaee-4dad-a82d-464aaf60ab47\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" Nov 25 22:00:57 crc kubenswrapper[4910]: I1125 22:00:57.963480 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" Nov 25 22:00:58 crc kubenswrapper[4910]: I1125 22:00:58.401698 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg"] Nov 25 22:00:58 crc kubenswrapper[4910]: I1125 22:00:58.416829 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 22:00:58 crc kubenswrapper[4910]: I1125 22:00:58.528151 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" event={"ID":"77fc796b-aaee-4dad-a82d-464aaf60ab47","Type":"ContainerStarted","Data":"52a02908a3c964fce256caee25f8f71a76fe5bab1f330e4fc58bc0d761c42467"} Nov 25 22:00:59 crc kubenswrapper[4910]: I1125 22:00:59.542697 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" event={"ID":"77fc796b-aaee-4dad-a82d-464aaf60ab47","Type":"ContainerStarted","Data":"7cf3e6c46b0a42e8c46178ae05a43d0f6ac9ae571f94e81dd2c8af3d966557f0"} Nov 25 22:00:59 crc kubenswrapper[4910]: I1125 22:00:59.570766 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" podStartSLOduration=2.11270527 podStartE2EDuration="2.570744645s" podCreationTimestamp="2025-11-25 22:00:57 +0000 UTC" firstStartedPulling="2025-11-25 22:00:58.416611571 +0000 UTC m=+1813.879087893" lastFinishedPulling="2025-11-25 22:00:58.874650946 +0000 UTC m=+1814.337127268" observedRunningTime="2025-11-25 22:00:59.562404203 +0000 UTC m=+1815.024880515" watchObservedRunningTime="2025-11-25 22:00:59.570744645 +0000 UTC m=+1815.033220967" Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.153294 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29401801-dfmbk"] Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.156756 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.174940 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401801-dfmbk"] Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.339376 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jvkx\" (UniqueName: \"kubernetes.io/projected/d759f7d3-5701-4d72-9df3-2509819d80f2-kube-api-access-7jvkx\") pod \"keystone-cron-29401801-dfmbk\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.339714 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-combined-ca-bundle\") pod \"keystone-cron-29401801-dfmbk\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.340215 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-fernet-keys\") pod \"keystone-cron-29401801-dfmbk\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.340368 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-config-data\") pod \"keystone-cron-29401801-dfmbk\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.443821 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-fernet-keys\") pod \"keystone-cron-29401801-dfmbk\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.443928 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-config-data\") pod \"keystone-cron-29401801-dfmbk\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.444078 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jvkx\" (UniqueName: \"kubernetes.io/projected/d759f7d3-5701-4d72-9df3-2509819d80f2-kube-api-access-7jvkx\") pod \"keystone-cron-29401801-dfmbk\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.444131 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-combined-ca-bundle\") pod \"keystone-cron-29401801-dfmbk\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.452230 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-combined-ca-bundle\") pod \"keystone-cron-29401801-dfmbk\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.452908 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-fernet-keys\") pod \"keystone-cron-29401801-dfmbk\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.457963 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-config-data\") pod \"keystone-cron-29401801-dfmbk\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.467843 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jvkx\" (UniqueName: \"kubernetes.io/projected/d759f7d3-5701-4d72-9df3-2509819d80f2-kube-api-access-7jvkx\") pod \"keystone-cron-29401801-dfmbk\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:00 crc kubenswrapper[4910]: I1125 22:01:00.489114 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:01 crc kubenswrapper[4910]: I1125 22:01:01.050464 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29401801-dfmbk"] Nov 25 22:01:01 crc kubenswrapper[4910]: W1125 22:01:01.059025 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd759f7d3_5701_4d72_9df3_2509819d80f2.slice/crio-46e8727c9abdea1b7b2ada6a251aab5cfa0d3a53130b8ee205b8114db78c3709 WatchSource:0}: Error finding container 46e8727c9abdea1b7b2ada6a251aab5cfa0d3a53130b8ee205b8114db78c3709: Status 404 returned error can't find the container with id 46e8727c9abdea1b7b2ada6a251aab5cfa0d3a53130b8ee205b8114db78c3709 Nov 25 22:01:01 crc kubenswrapper[4910]: I1125 22:01:01.571030 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401801-dfmbk" event={"ID":"d759f7d3-5701-4d72-9df3-2509819d80f2","Type":"ContainerStarted","Data":"547eebc05af0265bd2b2b6ed3dfcc157394af320e7a13fb1ed5a8a120aa9b5b8"} Nov 25 22:01:01 crc kubenswrapper[4910]: I1125 22:01:01.571609 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401801-dfmbk" event={"ID":"d759f7d3-5701-4d72-9df3-2509819d80f2","Type":"ContainerStarted","Data":"46e8727c9abdea1b7b2ada6a251aab5cfa0d3a53130b8ee205b8114db78c3709"} Nov 25 22:01:01 crc kubenswrapper[4910]: I1125 22:01:01.597486 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29401801-dfmbk" podStartSLOduration=1.597456081 podStartE2EDuration="1.597456081s" podCreationTimestamp="2025-11-25 22:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 22:01:01.59403688 +0000 UTC m=+1817.056513222" watchObservedRunningTime="2025-11-25 22:01:01.597456081 +0000 UTC m=+1817.059932403" Nov 25 22:01:03 crc kubenswrapper[4910]: I1125 22:01:03.596859 4910 generic.go:334] "Generic (PLEG): container finished" podID="d759f7d3-5701-4d72-9df3-2509819d80f2" containerID="547eebc05af0265bd2b2b6ed3dfcc157394af320e7a13fb1ed5a8a120aa9b5b8" exitCode=0 Nov 25 22:01:03 crc kubenswrapper[4910]: I1125 22:01:03.597043 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401801-dfmbk" event={"ID":"d759f7d3-5701-4d72-9df3-2509819d80f2","Type":"ContainerDied","Data":"547eebc05af0265bd2b2b6ed3dfcc157394af320e7a13fb1ed5a8a120aa9b5b8"} Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.026897 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.155829 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7jvkx\" (UniqueName: \"kubernetes.io/projected/d759f7d3-5701-4d72-9df3-2509819d80f2-kube-api-access-7jvkx\") pod \"d759f7d3-5701-4d72-9df3-2509819d80f2\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.156083 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-combined-ca-bundle\") pod \"d759f7d3-5701-4d72-9df3-2509819d80f2\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.156170 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-config-data\") pod \"d759f7d3-5701-4d72-9df3-2509819d80f2\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.156311 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-fernet-keys\") pod \"d759f7d3-5701-4d72-9df3-2509819d80f2\" (UID: \"d759f7d3-5701-4d72-9df3-2509819d80f2\") " Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.164571 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d759f7d3-5701-4d72-9df3-2509819d80f2-kube-api-access-7jvkx" (OuterVolumeSpecName: "kube-api-access-7jvkx") pod "d759f7d3-5701-4d72-9df3-2509819d80f2" (UID: "d759f7d3-5701-4d72-9df3-2509819d80f2"). InnerVolumeSpecName "kube-api-access-7jvkx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.164701 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "d759f7d3-5701-4d72-9df3-2509819d80f2" (UID: "d759f7d3-5701-4d72-9df3-2509819d80f2"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.200887 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d759f7d3-5701-4d72-9df3-2509819d80f2" (UID: "d759f7d3-5701-4d72-9df3-2509819d80f2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.228092 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-config-data" (OuterVolumeSpecName: "config-data") pod "d759f7d3-5701-4d72-9df3-2509819d80f2" (UID: "d759f7d3-5701-4d72-9df3-2509819d80f2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.258980 4910 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.259032 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.259042 4910 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d759f7d3-5701-4d72-9df3-2509819d80f2-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.259054 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7jvkx\" (UniqueName: \"kubernetes.io/projected/d759f7d3-5701-4d72-9df3-2509819d80f2-kube-api-access-7jvkx\") on node \"crc\" DevicePath \"\"" Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.623052 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29401801-dfmbk" event={"ID":"d759f7d3-5701-4d72-9df3-2509819d80f2","Type":"ContainerDied","Data":"46e8727c9abdea1b7b2ada6a251aab5cfa0d3a53130b8ee205b8114db78c3709"} Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.623436 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="46e8727c9abdea1b7b2ada6a251aab5cfa0d3a53130b8ee205b8114db78c3709" Nov 25 22:01:05 crc kubenswrapper[4910]: I1125 22:01:05.623186 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29401801-dfmbk" Nov 25 22:01:08 crc kubenswrapper[4910]: I1125 22:01:08.658338 4910 generic.go:334] "Generic (PLEG): container finished" podID="77fc796b-aaee-4dad-a82d-464aaf60ab47" containerID="7cf3e6c46b0a42e8c46178ae05a43d0f6ac9ae571f94e81dd2c8af3d966557f0" exitCode=0 Nov 25 22:01:08 crc kubenswrapper[4910]: I1125 22:01:08.658431 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" event={"ID":"77fc796b-aaee-4dad-a82d-464aaf60ab47","Type":"ContainerDied","Data":"7cf3e6c46b0a42e8c46178ae05a43d0f6ac9ae571f94e81dd2c8af3d966557f0"} Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.249575 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.291614 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8nsgb\" (UniqueName: \"kubernetes.io/projected/77fc796b-aaee-4dad-a82d-464aaf60ab47-kube-api-access-8nsgb\") pod \"77fc796b-aaee-4dad-a82d-464aaf60ab47\" (UID: \"77fc796b-aaee-4dad-a82d-464aaf60ab47\") " Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.291685 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77fc796b-aaee-4dad-a82d-464aaf60ab47-inventory\") pod \"77fc796b-aaee-4dad-a82d-464aaf60ab47\" (UID: \"77fc796b-aaee-4dad-a82d-464aaf60ab47\") " Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.291793 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77fc796b-aaee-4dad-a82d-464aaf60ab47-ssh-key\") pod \"77fc796b-aaee-4dad-a82d-464aaf60ab47\" (UID: \"77fc796b-aaee-4dad-a82d-464aaf60ab47\") " Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.303684 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77fc796b-aaee-4dad-a82d-464aaf60ab47-kube-api-access-8nsgb" (OuterVolumeSpecName: "kube-api-access-8nsgb") pod "77fc796b-aaee-4dad-a82d-464aaf60ab47" (UID: "77fc796b-aaee-4dad-a82d-464aaf60ab47"). InnerVolumeSpecName "kube-api-access-8nsgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.336463 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77fc796b-aaee-4dad-a82d-464aaf60ab47-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "77fc796b-aaee-4dad-a82d-464aaf60ab47" (UID: "77fc796b-aaee-4dad-a82d-464aaf60ab47"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.349506 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77fc796b-aaee-4dad-a82d-464aaf60ab47-inventory" (OuterVolumeSpecName: "inventory") pod "77fc796b-aaee-4dad-a82d-464aaf60ab47" (UID: "77fc796b-aaee-4dad-a82d-464aaf60ab47"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.394395 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/77fc796b-aaee-4dad-a82d-464aaf60ab47-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.394451 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8nsgb\" (UniqueName: \"kubernetes.io/projected/77fc796b-aaee-4dad-a82d-464aaf60ab47-kube-api-access-8nsgb\") on node \"crc\" DevicePath \"\"" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.394476 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/77fc796b-aaee-4dad-a82d-464aaf60ab47-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.682503 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" event={"ID":"77fc796b-aaee-4dad-a82d-464aaf60ab47","Type":"ContainerDied","Data":"52a02908a3c964fce256caee25f8f71a76fe5bab1f330e4fc58bc0d761c42467"} Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.682560 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52a02908a3c964fce256caee25f8f71a76fe5bab1f330e4fc58bc0d761c42467" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.682679 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-t7clg" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.803090 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp"] Nov 25 22:01:10 crc kubenswrapper[4910]: E1125 22:01:10.803643 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d759f7d3-5701-4d72-9df3-2509819d80f2" containerName="keystone-cron" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.803687 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d759f7d3-5701-4d72-9df3-2509819d80f2" containerName="keystone-cron" Nov 25 22:01:10 crc kubenswrapper[4910]: E1125 22:01:10.803753 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77fc796b-aaee-4dad-a82d-464aaf60ab47" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.803765 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="77fc796b-aaee-4dad-a82d-464aaf60ab47" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.803996 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d759f7d3-5701-4d72-9df3-2509819d80f2" containerName="keystone-cron" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.804035 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="77fc796b-aaee-4dad-a82d-464aaf60ab47" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.805038 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.809608 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.809967 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.810077 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.810205 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.831731 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp"] Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.907585 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c552b066-9a2f-46d0-9865-adaa8c454811-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp\" (UID: \"c552b066-9a2f-46d0-9865-adaa8c454811\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.907942 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c552b066-9a2f-46d0-9865-adaa8c454811-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp\" (UID: \"c552b066-9a2f-46d0-9865-adaa8c454811\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" Nov 25 22:01:10 crc kubenswrapper[4910]: I1125 22:01:10.908175 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6frl\" (UniqueName: \"kubernetes.io/projected/c552b066-9a2f-46d0-9865-adaa8c454811-kube-api-access-w6frl\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp\" (UID: \"c552b066-9a2f-46d0-9865-adaa8c454811\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" Nov 25 22:01:11 crc kubenswrapper[4910]: I1125 22:01:11.010514 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c552b066-9a2f-46d0-9865-adaa8c454811-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp\" (UID: \"c552b066-9a2f-46d0-9865-adaa8c454811\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" Nov 25 22:01:11 crc kubenswrapper[4910]: I1125 22:01:11.012343 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c552b066-9a2f-46d0-9865-adaa8c454811-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp\" (UID: \"c552b066-9a2f-46d0-9865-adaa8c454811\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" Nov 25 22:01:11 crc kubenswrapper[4910]: I1125 22:01:11.012609 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6frl\" (UniqueName: \"kubernetes.io/projected/c552b066-9a2f-46d0-9865-adaa8c454811-kube-api-access-w6frl\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp\" (UID: \"c552b066-9a2f-46d0-9865-adaa8c454811\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" Nov 25 22:01:11 crc kubenswrapper[4910]: I1125 22:01:11.018038 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c552b066-9a2f-46d0-9865-adaa8c454811-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp\" (UID: \"c552b066-9a2f-46d0-9865-adaa8c454811\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" Nov 25 22:01:11 crc kubenswrapper[4910]: I1125 22:01:11.024674 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c552b066-9a2f-46d0-9865-adaa8c454811-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp\" (UID: \"c552b066-9a2f-46d0-9865-adaa8c454811\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" Nov 25 22:01:11 crc kubenswrapper[4910]: I1125 22:01:11.032889 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6frl\" (UniqueName: \"kubernetes.io/projected/c552b066-9a2f-46d0-9865-adaa8c454811-kube-api-access-w6frl\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp\" (UID: \"c552b066-9a2f-46d0-9865-adaa8c454811\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" Nov 25 22:01:11 crc kubenswrapper[4910]: I1125 22:01:11.144759 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" Nov 25 22:01:11 crc kubenswrapper[4910]: W1125 22:01:11.546774 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc552b066_9a2f_46d0_9865_adaa8c454811.slice/crio-432ff12a489a1088d7f8a21fd6de74489393009007dee6d5e9fc2dd82866cc59 WatchSource:0}: Error finding container 432ff12a489a1088d7f8a21fd6de74489393009007dee6d5e9fc2dd82866cc59: Status 404 returned error can't find the container with id 432ff12a489a1088d7f8a21fd6de74489393009007dee6d5e9fc2dd82866cc59 Nov 25 22:01:11 crc kubenswrapper[4910]: I1125 22:01:11.546987 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp"] Nov 25 22:01:11 crc kubenswrapper[4910]: I1125 22:01:11.695925 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" event={"ID":"c552b066-9a2f-46d0-9865-adaa8c454811","Type":"ContainerStarted","Data":"432ff12a489a1088d7f8a21fd6de74489393009007dee6d5e9fc2dd82866cc59"} Nov 25 22:01:12 crc kubenswrapper[4910]: I1125 22:01:12.707450 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" event={"ID":"c552b066-9a2f-46d0-9865-adaa8c454811","Type":"ContainerStarted","Data":"1da8ffc66ae7502217b61e88c12797d522a4030361c62ce6ab256d06163ddc97"} Nov 25 22:01:12 crc kubenswrapper[4910]: I1125 22:01:12.726860 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" podStartSLOduration=2.188007597 podStartE2EDuration="2.726840085s" podCreationTimestamp="2025-11-25 22:01:10 +0000 UTC" firstStartedPulling="2025-11-25 22:01:11.550993582 +0000 UTC m=+1827.013469934" lastFinishedPulling="2025-11-25 22:01:12.08982609 +0000 UTC m=+1827.552302422" observedRunningTime="2025-11-25 22:01:12.724911894 +0000 UTC m=+1828.187388226" watchObservedRunningTime="2025-11-25 22:01:12.726840085 +0000 UTC m=+1828.189316427" Nov 25 22:01:22 crc kubenswrapper[4910]: I1125 22:01:22.830322 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" event={"ID":"c552b066-9a2f-46d0-9865-adaa8c454811","Type":"ContainerDied","Data":"1da8ffc66ae7502217b61e88c12797d522a4030361c62ce6ab256d06163ddc97"} Nov 25 22:01:22 crc kubenswrapper[4910]: I1125 22:01:22.830212 4910 generic.go:334] "Generic (PLEG): container finished" podID="c552b066-9a2f-46d0-9865-adaa8c454811" containerID="1da8ffc66ae7502217b61e88c12797d522a4030361c62ce6ab256d06163ddc97" exitCode=0 Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.347718 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.535203 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6frl\" (UniqueName: \"kubernetes.io/projected/c552b066-9a2f-46d0-9865-adaa8c454811-kube-api-access-w6frl\") pod \"c552b066-9a2f-46d0-9865-adaa8c454811\" (UID: \"c552b066-9a2f-46d0-9865-adaa8c454811\") " Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.535342 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c552b066-9a2f-46d0-9865-adaa8c454811-ssh-key\") pod \"c552b066-9a2f-46d0-9865-adaa8c454811\" (UID: \"c552b066-9a2f-46d0-9865-adaa8c454811\") " Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.535405 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c552b066-9a2f-46d0-9865-adaa8c454811-inventory\") pod \"c552b066-9a2f-46d0-9865-adaa8c454811\" (UID: \"c552b066-9a2f-46d0-9865-adaa8c454811\") " Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.544533 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c552b066-9a2f-46d0-9865-adaa8c454811-kube-api-access-w6frl" (OuterVolumeSpecName: "kube-api-access-w6frl") pod "c552b066-9a2f-46d0-9865-adaa8c454811" (UID: "c552b066-9a2f-46d0-9865-adaa8c454811"). InnerVolumeSpecName "kube-api-access-w6frl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.567647 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c552b066-9a2f-46d0-9865-adaa8c454811-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c552b066-9a2f-46d0-9865-adaa8c454811" (UID: "c552b066-9a2f-46d0-9865-adaa8c454811"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.591317 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c552b066-9a2f-46d0-9865-adaa8c454811-inventory" (OuterVolumeSpecName: "inventory") pod "c552b066-9a2f-46d0-9865-adaa8c454811" (UID: "c552b066-9a2f-46d0-9865-adaa8c454811"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.638950 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6frl\" (UniqueName: \"kubernetes.io/projected/c552b066-9a2f-46d0-9865-adaa8c454811-kube-api-access-w6frl\") on node \"crc\" DevicePath \"\"" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.639005 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c552b066-9a2f-46d0-9865-adaa8c454811-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.639017 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/c552b066-9a2f-46d0-9865-adaa8c454811-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.860397 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" event={"ID":"c552b066-9a2f-46d0-9865-adaa8c454811","Type":"ContainerDied","Data":"432ff12a489a1088d7f8a21fd6de74489393009007dee6d5e9fc2dd82866cc59"} Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.860999 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="432ff12a489a1088d7f8a21fd6de74489393009007dee6d5e9fc2dd82866cc59" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.860469 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.962859 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv"] Nov 25 22:01:24 crc kubenswrapper[4910]: E1125 22:01:24.963464 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c552b066-9a2f-46d0-9865-adaa8c454811" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.963491 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c552b066-9a2f-46d0-9865-adaa8c454811" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.963786 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c552b066-9a2f-46d0-9865-adaa8c454811" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.964809 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.968175 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.968872 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.969182 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.969769 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.969920 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.970330 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.980099 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.980639 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 22:01:24 crc kubenswrapper[4910]: I1125 22:01:24.999320 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv"] Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.050167 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n2hdf\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-kube-api-access-n2hdf\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.050269 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.050305 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.050347 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.050383 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.050416 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.050701 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.050846 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.051109 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.051192 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.051366 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.051440 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.051595 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.051750 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.153993 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n2hdf\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-kube-api-access-n2hdf\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.154082 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.154113 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.154164 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.154205 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.154265 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.154335 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.154365 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.154415 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.154450 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.154484 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.154513 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.154561 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.154613 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.161842 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.162037 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.162312 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.162347 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.164594 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.164778 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.165763 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.166233 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.166237 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.166818 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.167179 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.170205 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.179136 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n2hdf\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-kube-api-access-n2hdf\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.186098 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.293124 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:01:25 crc kubenswrapper[4910]: I1125 22:01:25.922119 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv"] Nov 25 22:01:26 crc kubenswrapper[4910]: I1125 22:01:26.891989 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" event={"ID":"ef782e7e-3e6a-41ed-a9b1-343be0faecc3","Type":"ContainerStarted","Data":"8b401664af77c7c0a9e98ede8512a8c8bcb62955c44c22716ac1942cc1161c86"} Nov 25 22:01:26 crc kubenswrapper[4910]: I1125 22:01:26.892540 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" event={"ID":"ef782e7e-3e6a-41ed-a9b1-343be0faecc3","Type":"ContainerStarted","Data":"79961bad81dc26309e6934226a2034da1a246d0b00b356943569211b411ac6e8"} Nov 25 22:01:26 crc kubenswrapper[4910]: I1125 22:01:26.928576 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" podStartSLOduration=2.482931125 podStartE2EDuration="2.928543899s" podCreationTimestamp="2025-11-25 22:01:24 +0000 UTC" firstStartedPulling="2025-11-25 22:01:25.923672802 +0000 UTC m=+1841.386149134" lastFinishedPulling="2025-11-25 22:01:26.369285546 +0000 UTC m=+1841.831761908" observedRunningTime="2025-11-25 22:01:26.924208994 +0000 UTC m=+1842.386685396" watchObservedRunningTime="2025-11-25 22:01:26.928543899 +0000 UTC m=+1842.391020261" Nov 25 22:02:08 crc kubenswrapper[4910]: I1125 22:02:08.361591 4910 generic.go:334] "Generic (PLEG): container finished" podID="ef782e7e-3e6a-41ed-a9b1-343be0faecc3" containerID="8b401664af77c7c0a9e98ede8512a8c8bcb62955c44c22716ac1942cc1161c86" exitCode=0 Nov 25 22:02:08 crc kubenswrapper[4910]: I1125 22:02:08.361717 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" event={"ID":"ef782e7e-3e6a-41ed-a9b1-343be0faecc3","Type":"ContainerDied","Data":"8b401664af77c7c0a9e98ede8512a8c8bcb62955c44c22716ac1942cc1161c86"} Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.863832 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.924858 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-libvirt-combined-ca-bundle\") pod \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.925294 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-ssh-key\") pod \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.925347 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-bootstrap-combined-ca-bundle\") pod \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.925375 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-ovn-combined-ca-bundle\") pod \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.925508 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-inventory\") pod \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.925547 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-nova-combined-ca-bundle\") pod \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.925618 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.925639 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n2hdf\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-kube-api-access-n2hdf\") pod \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.925727 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-neutron-metadata-combined-ca-bundle\") pod \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.925778 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-repo-setup-combined-ca-bundle\") pod \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.925806 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.925822 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-telemetry-combined-ca-bundle\") pod \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.925842 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-ovn-default-certs-0\") pod \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.925865 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\" (UID: \"ef782e7e-3e6a-41ed-a9b1-343be0faecc3\") " Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.933516 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "ef782e7e-3e6a-41ed-a9b1-343be0faecc3" (UID: "ef782e7e-3e6a-41ed-a9b1-343be0faecc3"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.934424 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "ef782e7e-3e6a-41ed-a9b1-343be0faecc3" (UID: "ef782e7e-3e6a-41ed-a9b1-343be0faecc3"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.934567 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "ef782e7e-3e6a-41ed-a9b1-343be0faecc3" (UID: "ef782e7e-3e6a-41ed-a9b1-343be0faecc3"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.935398 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "ef782e7e-3e6a-41ed-a9b1-343be0faecc3" (UID: "ef782e7e-3e6a-41ed-a9b1-343be0faecc3"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.937416 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "ef782e7e-3e6a-41ed-a9b1-343be0faecc3" (UID: "ef782e7e-3e6a-41ed-a9b1-343be0faecc3"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.939334 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "ef782e7e-3e6a-41ed-a9b1-343be0faecc3" (UID: "ef782e7e-3e6a-41ed-a9b1-343be0faecc3"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.939494 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "ef782e7e-3e6a-41ed-a9b1-343be0faecc3" (UID: "ef782e7e-3e6a-41ed-a9b1-343be0faecc3"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.939837 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "ef782e7e-3e6a-41ed-a9b1-343be0faecc3" (UID: "ef782e7e-3e6a-41ed-a9b1-343be0faecc3"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.939866 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "ef782e7e-3e6a-41ed-a9b1-343be0faecc3" (UID: "ef782e7e-3e6a-41ed-a9b1-343be0faecc3"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.944762 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-kube-api-access-n2hdf" (OuterVolumeSpecName: "kube-api-access-n2hdf") pod "ef782e7e-3e6a-41ed-a9b1-343be0faecc3" (UID: "ef782e7e-3e6a-41ed-a9b1-343be0faecc3"). InnerVolumeSpecName "kube-api-access-n2hdf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.945365 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "ef782e7e-3e6a-41ed-a9b1-343be0faecc3" (UID: "ef782e7e-3e6a-41ed-a9b1-343be0faecc3"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.945866 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "ef782e7e-3e6a-41ed-a9b1-343be0faecc3" (UID: "ef782e7e-3e6a-41ed-a9b1-343be0faecc3"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.964114 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ef782e7e-3e6a-41ed-a9b1-343be0faecc3" (UID: "ef782e7e-3e6a-41ed-a9b1-343be0faecc3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:02:09 crc kubenswrapper[4910]: I1125 22:02:09.971621 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-inventory" (OuterVolumeSpecName: "inventory") pod "ef782e7e-3e6a-41ed-a9b1-343be0faecc3" (UID: "ef782e7e-3e6a-41ed-a9b1-343be0faecc3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.028942 4910 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.029006 4910 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.029025 4910 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.029036 4910 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.029047 4910 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.029057 4910 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.029068 4910 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.029080 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.029089 4910 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.029099 4910 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.029109 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.029117 4910 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.029129 4910 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.029138 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n2hdf\" (UniqueName: \"kubernetes.io/projected/ef782e7e-3e6a-41ed-a9b1-343be0faecc3-kube-api-access-n2hdf\") on node \"crc\" DevicePath \"\"" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.387125 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" event={"ID":"ef782e7e-3e6a-41ed-a9b1-343be0faecc3","Type":"ContainerDied","Data":"79961bad81dc26309e6934226a2034da1a246d0b00b356943569211b411ac6e8"} Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.387197 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="79961bad81dc26309e6934226a2034da1a246d0b00b356943569211b411ac6e8" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.387305 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.542021 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv"] Nov 25 22:02:10 crc kubenswrapper[4910]: E1125 22:02:10.543378 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef782e7e-3e6a-41ed-a9b1-343be0faecc3" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.543398 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef782e7e-3e6a-41ed-a9b1-343be0faecc3" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.543630 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef782e7e-3e6a-41ed-a9b1-343be0faecc3" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.544729 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.551810 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.552101 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.552656 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.552789 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.555150 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.569491 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv"] Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.642840 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.643301 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.643411 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7b96\" (UniqueName: \"kubernetes.io/projected/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-kube-api-access-m7b96\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.643554 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.643872 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.745938 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.746070 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.746119 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7b96\" (UniqueName: \"kubernetes.io/projected/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-kube-api-access-m7b96\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.746192 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.746270 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.747401 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.753612 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.753969 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.754998 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.768205 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7b96\" (UniqueName: \"kubernetes.io/projected/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-kube-api-access-m7b96\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-p7pqv\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:10 crc kubenswrapper[4910]: I1125 22:02:10.875960 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:02:11 crc kubenswrapper[4910]: I1125 22:02:11.472630 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv"] Nov 25 22:02:12 crc kubenswrapper[4910]: I1125 22:02:12.422556 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" event={"ID":"0ab610e6-8ab3-4c4c-83fa-5ce52795f545","Type":"ContainerStarted","Data":"aa2793feb9bd51a1e6adc06d41fffb3a2dda863aa95a1a1632b1b6e1ec40849b"} Nov 25 22:02:13 crc kubenswrapper[4910]: I1125 22:02:13.437697 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" event={"ID":"0ab610e6-8ab3-4c4c-83fa-5ce52795f545","Type":"ContainerStarted","Data":"1270b7cd8fa9ab5f7f0f8dc1ee9f4b1ede2eee797ea2b144c1026f64ea908653"} Nov 25 22:02:13 crc kubenswrapper[4910]: I1125 22:02:13.487118 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" podStartSLOduration=2.752430235 podStartE2EDuration="3.48708878s" podCreationTimestamp="2025-11-25 22:02:10 +0000 UTC" firstStartedPulling="2025-11-25 22:02:11.48226593 +0000 UTC m=+1886.944742262" lastFinishedPulling="2025-11-25 22:02:12.216924455 +0000 UTC m=+1887.679400807" observedRunningTime="2025-11-25 22:02:13.473424523 +0000 UTC m=+1888.935900875" watchObservedRunningTime="2025-11-25 22:02:13.48708878 +0000 UTC m=+1888.949565142" Nov 25 22:02:53 crc kubenswrapper[4910]: I1125 22:02:53.098758 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:02:53 crc kubenswrapper[4910]: I1125 22:02:53.099853 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:03:22 crc kubenswrapper[4910]: I1125 22:03:22.233140 4910 generic.go:334] "Generic (PLEG): container finished" podID="0ab610e6-8ab3-4c4c-83fa-5ce52795f545" containerID="1270b7cd8fa9ab5f7f0f8dc1ee9f4b1ede2eee797ea2b144c1026f64ea908653" exitCode=0 Nov 25 22:03:22 crc kubenswrapper[4910]: I1125 22:03:22.233321 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" event={"ID":"0ab610e6-8ab3-4c4c-83fa-5ce52795f545","Type":"ContainerDied","Data":"1270b7cd8fa9ab5f7f0f8dc1ee9f4b1ede2eee797ea2b144c1026f64ea908653"} Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.098738 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.099472 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.730534 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.848359 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ovncontroller-config-0\") pod \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.848549 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-inventory\") pod \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.848633 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ssh-key\") pod \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.849573 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7b96\" (UniqueName: \"kubernetes.io/projected/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-kube-api-access-m7b96\") pod \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.849678 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ovn-combined-ca-bundle\") pod \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\" (UID: \"0ab610e6-8ab3-4c4c-83fa-5ce52795f545\") " Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.856672 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-kube-api-access-m7b96" (OuterVolumeSpecName: "kube-api-access-m7b96") pod "0ab610e6-8ab3-4c4c-83fa-5ce52795f545" (UID: "0ab610e6-8ab3-4c4c-83fa-5ce52795f545"). InnerVolumeSpecName "kube-api-access-m7b96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.863791 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "0ab610e6-8ab3-4c4c-83fa-5ce52795f545" (UID: "0ab610e6-8ab3-4c4c-83fa-5ce52795f545"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.882998 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-inventory" (OuterVolumeSpecName: "inventory") pod "0ab610e6-8ab3-4c4c-83fa-5ce52795f545" (UID: "0ab610e6-8ab3-4c4c-83fa-5ce52795f545"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.887659 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "0ab610e6-8ab3-4c4c-83fa-5ce52795f545" (UID: "0ab610e6-8ab3-4c4c-83fa-5ce52795f545"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.909496 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0ab610e6-8ab3-4c4c-83fa-5ce52795f545" (UID: "0ab610e6-8ab3-4c4c-83fa-5ce52795f545"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.952537 4910 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.952578 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.952588 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.952603 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7b96\" (UniqueName: \"kubernetes.io/projected/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-kube-api-access-m7b96\") on node \"crc\" DevicePath \"\"" Nov 25 22:03:23 crc kubenswrapper[4910]: I1125 22:03:23.952613 4910 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0ab610e6-8ab3-4c4c-83fa-5ce52795f545-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.271490 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" event={"ID":"0ab610e6-8ab3-4c4c-83fa-5ce52795f545","Type":"ContainerDied","Data":"aa2793feb9bd51a1e6adc06d41fffb3a2dda863aa95a1a1632b1b6e1ec40849b"} Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.271561 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aa2793feb9bd51a1e6adc06d41fffb3a2dda863aa95a1a1632b1b6e1ec40849b" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.271587 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-p7pqv" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.379071 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj"] Nov 25 22:03:24 crc kubenswrapper[4910]: E1125 22:03:24.379592 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0ab610e6-8ab3-4c4c-83fa-5ce52795f545" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.379613 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0ab610e6-8ab3-4c4c-83fa-5ce52795f545" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.379826 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="0ab610e6-8ab3-4c4c-83fa-5ce52795f545" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.380748 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.383653 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.384063 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.384206 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.384353 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.384771 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.386335 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.398453 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj"] Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.566677 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.566759 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pp2gx\" (UniqueName: \"kubernetes.io/projected/ed6d4c0f-684e-4174-ad4c-5f034025d52a-kube-api-access-pp2gx\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.566802 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.566834 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.566870 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.567417 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.670542 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.670664 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pp2gx\" (UniqueName: \"kubernetes.io/projected/ed6d4c0f-684e-4174-ad4c-5f034025d52a-kube-api-access-pp2gx\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.670727 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.670777 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.670841 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.670947 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.678396 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.678541 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.679933 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.681640 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.685129 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.706627 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pp2gx\" (UniqueName: \"kubernetes.io/projected/ed6d4c0f-684e-4174-ad4c-5f034025d52a-kube-api-access-pp2gx\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:24 crc kubenswrapper[4910]: I1125 22:03:24.711490 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:03:25 crc kubenswrapper[4910]: I1125 22:03:25.397470 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj"] Nov 25 22:03:26 crc kubenswrapper[4910]: I1125 22:03:26.316522 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" event={"ID":"ed6d4c0f-684e-4174-ad4c-5f034025d52a","Type":"ContainerStarted","Data":"f518777da8bbe796d57e36df54cceebd449ffb78050bbcee657377f025f20b8c"} Nov 25 22:03:26 crc kubenswrapper[4910]: I1125 22:03:26.316572 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" event={"ID":"ed6d4c0f-684e-4174-ad4c-5f034025d52a","Type":"ContainerStarted","Data":"670e1385fd3dfe39d6795bb439361a75f93e7a2185be896f1234e907e5f8e5de"} Nov 25 22:03:26 crc kubenswrapper[4910]: I1125 22:03:26.341771 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" podStartSLOduration=1.845962796 podStartE2EDuration="2.341750788s" podCreationTimestamp="2025-11-25 22:03:24 +0000 UTC" firstStartedPulling="2025-11-25 22:03:25.411827372 +0000 UTC m=+1960.874303704" lastFinishedPulling="2025-11-25 22:03:25.907615364 +0000 UTC m=+1961.370091696" observedRunningTime="2025-11-25 22:03:26.336445825 +0000 UTC m=+1961.798922147" watchObservedRunningTime="2025-11-25 22:03:26.341750788 +0000 UTC m=+1961.804227110" Nov 25 22:03:53 crc kubenswrapper[4910]: I1125 22:03:53.098989 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:03:53 crc kubenswrapper[4910]: I1125 22:03:53.099776 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:03:53 crc kubenswrapper[4910]: I1125 22:03:53.099898 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 22:03:53 crc kubenswrapper[4910]: I1125 22:03:53.101399 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"61d77b28ec593a2c9c5098bb7f50771fc12cbdca15009996847ff51c55bf8549"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 22:03:53 crc kubenswrapper[4910]: I1125 22:03:53.101514 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://61d77b28ec593a2c9c5098bb7f50771fc12cbdca15009996847ff51c55bf8549" gracePeriod=600 Nov 25 22:03:53 crc kubenswrapper[4910]: I1125 22:03:53.668362 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="61d77b28ec593a2c9c5098bb7f50771fc12cbdca15009996847ff51c55bf8549" exitCode=0 Nov 25 22:03:53 crc kubenswrapper[4910]: I1125 22:03:53.668418 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"61d77b28ec593a2c9c5098bb7f50771fc12cbdca15009996847ff51c55bf8549"} Nov 25 22:03:53 crc kubenswrapper[4910]: I1125 22:03:53.669322 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255"} Nov 25 22:03:53 crc kubenswrapper[4910]: I1125 22:03:53.669353 4910 scope.go:117] "RemoveContainer" containerID="c38186254182b8d4648fd2ab674cd3b7262f4e82496b5e86af8a421a22bf8c8a" Nov 25 22:04:20 crc kubenswrapper[4910]: I1125 22:04:20.046316 4910 generic.go:334] "Generic (PLEG): container finished" podID="ed6d4c0f-684e-4174-ad4c-5f034025d52a" containerID="f518777da8bbe796d57e36df54cceebd449ffb78050bbcee657377f025f20b8c" exitCode=0 Nov 25 22:04:20 crc kubenswrapper[4910]: I1125 22:04:20.046402 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" event={"ID":"ed6d4c0f-684e-4174-ad4c-5f034025d52a","Type":"ContainerDied","Data":"f518777da8bbe796d57e36df54cceebd449ffb78050bbcee657377f025f20b8c"} Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.541466 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.708691 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pp2gx\" (UniqueName: \"kubernetes.io/projected/ed6d4c0f-684e-4174-ad4c-5f034025d52a-kube-api-access-pp2gx\") pod \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.708794 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-neutron-metadata-combined-ca-bundle\") pod \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.708865 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-neutron-ovn-metadata-agent-neutron-config-0\") pod \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.709054 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-nova-metadata-neutron-config-0\") pod \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.709163 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-inventory\") pod \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.709200 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-ssh-key\") pod \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\" (UID: \"ed6d4c0f-684e-4174-ad4c-5f034025d52a\") " Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.717754 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "ed6d4c0f-684e-4174-ad4c-5f034025d52a" (UID: "ed6d4c0f-684e-4174-ad4c-5f034025d52a"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.722186 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed6d4c0f-684e-4174-ad4c-5f034025d52a-kube-api-access-pp2gx" (OuterVolumeSpecName: "kube-api-access-pp2gx") pod "ed6d4c0f-684e-4174-ad4c-5f034025d52a" (UID: "ed6d4c0f-684e-4174-ad4c-5f034025d52a"). InnerVolumeSpecName "kube-api-access-pp2gx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.745665 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-inventory" (OuterVolumeSpecName: "inventory") pod "ed6d4c0f-684e-4174-ad4c-5f034025d52a" (UID: "ed6d4c0f-684e-4174-ad4c-5f034025d52a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.749914 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "ed6d4c0f-684e-4174-ad4c-5f034025d52a" (UID: "ed6d4c0f-684e-4174-ad4c-5f034025d52a"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.750192 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ed6d4c0f-684e-4174-ad4c-5f034025d52a" (UID: "ed6d4c0f-684e-4174-ad4c-5f034025d52a"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.766974 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "ed6d4c0f-684e-4174-ad4c-5f034025d52a" (UID: "ed6d4c0f-684e-4174-ad4c-5f034025d52a"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.811835 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pp2gx\" (UniqueName: \"kubernetes.io/projected/ed6d4c0f-684e-4174-ad4c-5f034025d52a-kube-api-access-pp2gx\") on node \"crc\" DevicePath \"\"" Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.811871 4910 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.811885 4910 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.811900 4910 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.811911 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 22:04:21 crc kubenswrapper[4910]: I1125 22:04:21.811923 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ed6d4c0f-684e-4174-ad4c-5f034025d52a-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.078419 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" event={"ID":"ed6d4c0f-684e-4174-ad4c-5f034025d52a","Type":"ContainerDied","Data":"670e1385fd3dfe39d6795bb439361a75f93e7a2185be896f1234e907e5f8e5de"} Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.079076 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="670e1385fd3dfe39d6795bb439361a75f93e7a2185be896f1234e907e5f8e5de" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.078471 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.339239 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk"] Nov 25 22:04:22 crc kubenswrapper[4910]: E1125 22:04:22.339789 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed6d4c0f-684e-4174-ad4c-5f034025d52a" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.339812 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed6d4c0f-684e-4174-ad4c-5f034025d52a" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.340033 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed6d4c0f-684e-4174-ad4c-5f034025d52a" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.340840 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.343557 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.343701 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.344339 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.345435 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.347080 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.357396 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk"] Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.430608 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.430687 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mtsnw\" (UniqueName: \"kubernetes.io/projected/1132a133-2fdf-4a87-b132-d1f1c0a26c76-kube-api-access-mtsnw\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.430755 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.430849 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.430889 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.533465 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.533565 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.533605 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.533736 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.533761 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mtsnw\" (UniqueName: \"kubernetes.io/projected/1132a133-2fdf-4a87-b132-d1f1c0a26c76-kube-api-access-mtsnw\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.542323 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.550533 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.550872 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.551547 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.559102 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mtsnw\" (UniqueName: \"kubernetes.io/projected/1132a133-2fdf-4a87-b132-d1f1c0a26c76-kube-api-access-mtsnw\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-h78fk\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:22 crc kubenswrapper[4910]: I1125 22:04:22.656589 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:04:23 crc kubenswrapper[4910]: I1125 22:04:23.077359 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk"] Nov 25 22:04:23 crc kubenswrapper[4910]: W1125 22:04:23.081721 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1132a133_2fdf_4a87_b132_d1f1c0a26c76.slice/crio-b006ee690cc86fa62ba23a97df1fec58610ec977d5fcc824a7c3f138fc7bc1ee WatchSource:0}: Error finding container b006ee690cc86fa62ba23a97df1fec58610ec977d5fcc824a7c3f138fc7bc1ee: Status 404 returned error can't find the container with id b006ee690cc86fa62ba23a97df1fec58610ec977d5fcc824a7c3f138fc7bc1ee Nov 25 22:04:24 crc kubenswrapper[4910]: I1125 22:04:24.103563 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" event={"ID":"1132a133-2fdf-4a87-b132-d1f1c0a26c76","Type":"ContainerStarted","Data":"975368ba24bac4df5a2b889d5769ac4a72b262288d6831ecd046dd3fcbec6a82"} Nov 25 22:04:24 crc kubenswrapper[4910]: I1125 22:04:24.103936 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" event={"ID":"1132a133-2fdf-4a87-b132-d1f1c0a26c76","Type":"ContainerStarted","Data":"b006ee690cc86fa62ba23a97df1fec58610ec977d5fcc824a7c3f138fc7bc1ee"} Nov 25 22:04:24 crc kubenswrapper[4910]: I1125 22:04:24.142606 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" podStartSLOduration=1.604340691 podStartE2EDuration="2.142572784s" podCreationTimestamp="2025-11-25 22:04:22 +0000 UTC" firstStartedPulling="2025-11-25 22:04:23.084473753 +0000 UTC m=+2018.546950075" lastFinishedPulling="2025-11-25 22:04:23.622705846 +0000 UTC m=+2019.085182168" observedRunningTime="2025-11-25 22:04:24.137528879 +0000 UTC m=+2019.600005251" watchObservedRunningTime="2025-11-25 22:04:24.142572784 +0000 UTC m=+2019.605049146" Nov 25 22:05:42 crc kubenswrapper[4910]: I1125 22:05:42.173996 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-8vl9p"] Nov 25 22:05:42 crc kubenswrapper[4910]: I1125 22:05:42.178627 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:42 crc kubenswrapper[4910]: I1125 22:05:42.195362 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8vl9p"] Nov 25 22:05:42 crc kubenswrapper[4910]: I1125 22:05:42.238040 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f3312dd-22cc-448e-a10d-675dae293aef-catalog-content\") pod \"certified-operators-8vl9p\" (UID: \"5f3312dd-22cc-448e-a10d-675dae293aef\") " pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:42 crc kubenswrapper[4910]: I1125 22:05:42.238136 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f3312dd-22cc-448e-a10d-675dae293aef-utilities\") pod \"certified-operators-8vl9p\" (UID: \"5f3312dd-22cc-448e-a10d-675dae293aef\") " pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:42 crc kubenswrapper[4910]: I1125 22:05:42.238625 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42zw6\" (UniqueName: \"kubernetes.io/projected/5f3312dd-22cc-448e-a10d-675dae293aef-kube-api-access-42zw6\") pod \"certified-operators-8vl9p\" (UID: \"5f3312dd-22cc-448e-a10d-675dae293aef\") " pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:42 crc kubenswrapper[4910]: I1125 22:05:42.341026 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f3312dd-22cc-448e-a10d-675dae293aef-catalog-content\") pod \"certified-operators-8vl9p\" (UID: \"5f3312dd-22cc-448e-a10d-675dae293aef\") " pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:42 crc kubenswrapper[4910]: I1125 22:05:42.341130 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f3312dd-22cc-448e-a10d-675dae293aef-utilities\") pod \"certified-operators-8vl9p\" (UID: \"5f3312dd-22cc-448e-a10d-675dae293aef\") " pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:42 crc kubenswrapper[4910]: I1125 22:05:42.341287 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42zw6\" (UniqueName: \"kubernetes.io/projected/5f3312dd-22cc-448e-a10d-675dae293aef-kube-api-access-42zw6\") pod \"certified-operators-8vl9p\" (UID: \"5f3312dd-22cc-448e-a10d-675dae293aef\") " pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:42 crc kubenswrapper[4910]: I1125 22:05:42.341825 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f3312dd-22cc-448e-a10d-675dae293aef-utilities\") pod \"certified-operators-8vl9p\" (UID: \"5f3312dd-22cc-448e-a10d-675dae293aef\") " pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:42 crc kubenswrapper[4910]: I1125 22:05:42.342596 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f3312dd-22cc-448e-a10d-675dae293aef-catalog-content\") pod \"certified-operators-8vl9p\" (UID: \"5f3312dd-22cc-448e-a10d-675dae293aef\") " pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:42 crc kubenswrapper[4910]: I1125 22:05:42.380779 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42zw6\" (UniqueName: \"kubernetes.io/projected/5f3312dd-22cc-448e-a10d-675dae293aef-kube-api-access-42zw6\") pod \"certified-operators-8vl9p\" (UID: \"5f3312dd-22cc-448e-a10d-675dae293aef\") " pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:42 crc kubenswrapper[4910]: I1125 22:05:42.508356 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:43 crc kubenswrapper[4910]: I1125 22:05:43.017557 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-8vl9p"] Nov 25 22:05:43 crc kubenswrapper[4910]: I1125 22:05:43.094565 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8vl9p" event={"ID":"5f3312dd-22cc-448e-a10d-675dae293aef","Type":"ContainerStarted","Data":"c58b38f1246971011bda3288c8483cb80ca7f34bb10bcd41f08603b9e50c3b96"} Nov 25 22:05:44 crc kubenswrapper[4910]: I1125 22:05:44.113872 4910 generic.go:334] "Generic (PLEG): container finished" podID="5f3312dd-22cc-448e-a10d-675dae293aef" containerID="c8c158394891c96c8021ea723e0611d83c24a6950be6f8c7a1009087252619d8" exitCode=0 Nov 25 22:05:44 crc kubenswrapper[4910]: I1125 22:05:44.114576 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8vl9p" event={"ID":"5f3312dd-22cc-448e-a10d-675dae293aef","Type":"ContainerDied","Data":"c8c158394891c96c8021ea723e0611d83c24a6950be6f8c7a1009087252619d8"} Nov 25 22:05:46 crc kubenswrapper[4910]: I1125 22:05:46.152393 4910 generic.go:334] "Generic (PLEG): container finished" podID="5f3312dd-22cc-448e-a10d-675dae293aef" containerID="01785b9d866e9348457ba6e1b430d9247f07ab4f49cd486b32624a642ee3e4f0" exitCode=0 Nov 25 22:05:46 crc kubenswrapper[4910]: I1125 22:05:46.152488 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8vl9p" event={"ID":"5f3312dd-22cc-448e-a10d-675dae293aef","Type":"ContainerDied","Data":"01785b9d866e9348457ba6e1b430d9247f07ab4f49cd486b32624a642ee3e4f0"} Nov 25 22:05:47 crc kubenswrapper[4910]: I1125 22:05:47.167460 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8vl9p" event={"ID":"5f3312dd-22cc-448e-a10d-675dae293aef","Type":"ContainerStarted","Data":"0f8ea4dad2f17b5a5b4a36a413f6f550667ed5a4dc058b6b51e9894f6b1cb897"} Nov 25 22:05:47 crc kubenswrapper[4910]: I1125 22:05:47.209224 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-8vl9p" podStartSLOduration=2.753211898 podStartE2EDuration="5.209193119s" podCreationTimestamp="2025-11-25 22:05:42 +0000 UTC" firstStartedPulling="2025-11-25 22:05:44.118615173 +0000 UTC m=+2099.581091495" lastFinishedPulling="2025-11-25 22:05:46.574596394 +0000 UTC m=+2102.037072716" observedRunningTime="2025-11-25 22:05:47.201655966 +0000 UTC m=+2102.664132328" watchObservedRunningTime="2025-11-25 22:05:47.209193119 +0000 UTC m=+2102.671669441" Nov 25 22:05:52 crc kubenswrapper[4910]: I1125 22:05:52.508977 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:52 crc kubenswrapper[4910]: I1125 22:05:52.509459 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:52 crc kubenswrapper[4910]: I1125 22:05:52.559454 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:53 crc kubenswrapper[4910]: I1125 22:05:53.099582 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:05:53 crc kubenswrapper[4910]: I1125 22:05:53.099660 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:05:53 crc kubenswrapper[4910]: I1125 22:05:53.323933 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:53 crc kubenswrapper[4910]: I1125 22:05:53.378962 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8vl9p"] Nov 25 22:05:55 crc kubenswrapper[4910]: I1125 22:05:55.304384 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-8vl9p" podUID="5f3312dd-22cc-448e-a10d-675dae293aef" containerName="registry-server" containerID="cri-o://0f8ea4dad2f17b5a5b4a36a413f6f550667ed5a4dc058b6b51e9894f6b1cb897" gracePeriod=2 Nov 25 22:05:55 crc kubenswrapper[4910]: I1125 22:05:55.897834 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:55 crc kubenswrapper[4910]: I1125 22:05:55.987461 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f3312dd-22cc-448e-a10d-675dae293aef-catalog-content\") pod \"5f3312dd-22cc-448e-a10d-675dae293aef\" (UID: \"5f3312dd-22cc-448e-a10d-675dae293aef\") " Nov 25 22:05:55 crc kubenswrapper[4910]: I1125 22:05:55.987575 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42zw6\" (UniqueName: \"kubernetes.io/projected/5f3312dd-22cc-448e-a10d-675dae293aef-kube-api-access-42zw6\") pod \"5f3312dd-22cc-448e-a10d-675dae293aef\" (UID: \"5f3312dd-22cc-448e-a10d-675dae293aef\") " Nov 25 22:05:55 crc kubenswrapper[4910]: I1125 22:05:55.987707 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f3312dd-22cc-448e-a10d-675dae293aef-utilities\") pod \"5f3312dd-22cc-448e-a10d-675dae293aef\" (UID: \"5f3312dd-22cc-448e-a10d-675dae293aef\") " Nov 25 22:05:55 crc kubenswrapper[4910]: I1125 22:05:55.989367 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f3312dd-22cc-448e-a10d-675dae293aef-utilities" (OuterVolumeSpecName: "utilities") pod "5f3312dd-22cc-448e-a10d-675dae293aef" (UID: "5f3312dd-22cc-448e-a10d-675dae293aef"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:05:55 crc kubenswrapper[4910]: I1125 22:05:55.995461 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f3312dd-22cc-448e-a10d-675dae293aef-kube-api-access-42zw6" (OuterVolumeSpecName: "kube-api-access-42zw6") pod "5f3312dd-22cc-448e-a10d-675dae293aef" (UID: "5f3312dd-22cc-448e-a10d-675dae293aef"). InnerVolumeSpecName "kube-api-access-42zw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.091575 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42zw6\" (UniqueName: \"kubernetes.io/projected/5f3312dd-22cc-448e-a10d-675dae293aef-kube-api-access-42zw6\") on node \"crc\" DevicePath \"\"" Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.091656 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f3312dd-22cc-448e-a10d-675dae293aef-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.144053 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f3312dd-22cc-448e-a10d-675dae293aef-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5f3312dd-22cc-448e-a10d-675dae293aef" (UID: "5f3312dd-22cc-448e-a10d-675dae293aef"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.193635 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f3312dd-22cc-448e-a10d-675dae293aef-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.318306 4910 generic.go:334] "Generic (PLEG): container finished" podID="5f3312dd-22cc-448e-a10d-675dae293aef" containerID="0f8ea4dad2f17b5a5b4a36a413f6f550667ed5a4dc058b6b51e9894f6b1cb897" exitCode=0 Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.319014 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8vl9p" event={"ID":"5f3312dd-22cc-448e-a10d-675dae293aef","Type":"ContainerDied","Data":"0f8ea4dad2f17b5a5b4a36a413f6f550667ed5a4dc058b6b51e9894f6b1cb897"} Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.319800 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-8vl9p" event={"ID":"5f3312dd-22cc-448e-a10d-675dae293aef","Type":"ContainerDied","Data":"c58b38f1246971011bda3288c8483cb80ca7f34bb10bcd41f08603b9e50c3b96"} Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.319903 4910 scope.go:117] "RemoveContainer" containerID="0f8ea4dad2f17b5a5b4a36a413f6f550667ed5a4dc058b6b51e9894f6b1cb897" Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.320444 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-8vl9p" Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.356413 4910 scope.go:117] "RemoveContainer" containerID="01785b9d866e9348457ba6e1b430d9247f07ab4f49cd486b32624a642ee3e4f0" Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.361512 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-8vl9p"] Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.371693 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-8vl9p"] Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.389120 4910 scope.go:117] "RemoveContainer" containerID="c8c158394891c96c8021ea723e0611d83c24a6950be6f8c7a1009087252619d8" Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.426757 4910 scope.go:117] "RemoveContainer" containerID="0f8ea4dad2f17b5a5b4a36a413f6f550667ed5a4dc058b6b51e9894f6b1cb897" Nov 25 22:05:56 crc kubenswrapper[4910]: E1125 22:05:56.427439 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f8ea4dad2f17b5a5b4a36a413f6f550667ed5a4dc058b6b51e9894f6b1cb897\": container with ID starting with 0f8ea4dad2f17b5a5b4a36a413f6f550667ed5a4dc058b6b51e9894f6b1cb897 not found: ID does not exist" containerID="0f8ea4dad2f17b5a5b4a36a413f6f550667ed5a4dc058b6b51e9894f6b1cb897" Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.427580 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f8ea4dad2f17b5a5b4a36a413f6f550667ed5a4dc058b6b51e9894f6b1cb897"} err="failed to get container status \"0f8ea4dad2f17b5a5b4a36a413f6f550667ed5a4dc058b6b51e9894f6b1cb897\": rpc error: code = NotFound desc = could not find container \"0f8ea4dad2f17b5a5b4a36a413f6f550667ed5a4dc058b6b51e9894f6b1cb897\": container with ID starting with 0f8ea4dad2f17b5a5b4a36a413f6f550667ed5a4dc058b6b51e9894f6b1cb897 not found: ID does not exist" Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.427695 4910 scope.go:117] "RemoveContainer" containerID="01785b9d866e9348457ba6e1b430d9247f07ab4f49cd486b32624a642ee3e4f0" Nov 25 22:05:56 crc kubenswrapper[4910]: E1125 22:05:56.428266 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01785b9d866e9348457ba6e1b430d9247f07ab4f49cd486b32624a642ee3e4f0\": container with ID starting with 01785b9d866e9348457ba6e1b430d9247f07ab4f49cd486b32624a642ee3e4f0 not found: ID does not exist" containerID="01785b9d866e9348457ba6e1b430d9247f07ab4f49cd486b32624a642ee3e4f0" Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.428311 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01785b9d866e9348457ba6e1b430d9247f07ab4f49cd486b32624a642ee3e4f0"} err="failed to get container status \"01785b9d866e9348457ba6e1b430d9247f07ab4f49cd486b32624a642ee3e4f0\": rpc error: code = NotFound desc = could not find container \"01785b9d866e9348457ba6e1b430d9247f07ab4f49cd486b32624a642ee3e4f0\": container with ID starting with 01785b9d866e9348457ba6e1b430d9247f07ab4f49cd486b32624a642ee3e4f0 not found: ID does not exist" Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.428366 4910 scope.go:117] "RemoveContainer" containerID="c8c158394891c96c8021ea723e0611d83c24a6950be6f8c7a1009087252619d8" Nov 25 22:05:56 crc kubenswrapper[4910]: E1125 22:05:56.428697 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c8c158394891c96c8021ea723e0611d83c24a6950be6f8c7a1009087252619d8\": container with ID starting with c8c158394891c96c8021ea723e0611d83c24a6950be6f8c7a1009087252619d8 not found: ID does not exist" containerID="c8c158394891c96c8021ea723e0611d83c24a6950be6f8c7a1009087252619d8" Nov 25 22:05:56 crc kubenswrapper[4910]: I1125 22:05:56.428726 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c8c158394891c96c8021ea723e0611d83c24a6950be6f8c7a1009087252619d8"} err="failed to get container status \"c8c158394891c96c8021ea723e0611d83c24a6950be6f8c7a1009087252619d8\": rpc error: code = NotFound desc = could not find container \"c8c158394891c96c8021ea723e0611d83c24a6950be6f8c7a1009087252619d8\": container with ID starting with c8c158394891c96c8021ea723e0611d83c24a6950be6f8c7a1009087252619d8 not found: ID does not exist" Nov 25 22:05:57 crc kubenswrapper[4910]: I1125 22:05:57.228137 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f3312dd-22cc-448e-a10d-675dae293aef" path="/var/lib/kubelet/pods/5f3312dd-22cc-448e-a10d-675dae293aef/volumes" Nov 25 22:06:23 crc kubenswrapper[4910]: I1125 22:06:23.099064 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:06:23 crc kubenswrapper[4910]: I1125 22:06:23.099524 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:06:53 crc kubenswrapper[4910]: I1125 22:06:53.099433 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:06:53 crc kubenswrapper[4910]: I1125 22:06:53.100372 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:06:53 crc kubenswrapper[4910]: I1125 22:06:53.100445 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 22:06:53 crc kubenswrapper[4910]: I1125 22:06:53.101589 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 22:06:53 crc kubenswrapper[4910]: I1125 22:06:53.101660 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" gracePeriod=600 Nov 25 22:06:53 crc kubenswrapper[4910]: E1125 22:06:53.263571 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:06:54 crc kubenswrapper[4910]: I1125 22:06:54.007977 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" exitCode=0 Nov 25 22:06:54 crc kubenswrapper[4910]: I1125 22:06:54.008117 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255"} Nov 25 22:06:54 crc kubenswrapper[4910]: I1125 22:06:54.008787 4910 scope.go:117] "RemoveContainer" containerID="61d77b28ec593a2c9c5098bb7f50771fc12cbdca15009996847ff51c55bf8549" Nov 25 22:06:54 crc kubenswrapper[4910]: I1125 22:06:54.010178 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:06:54 crc kubenswrapper[4910]: E1125 22:06:54.010771 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:07:09 crc kubenswrapper[4910]: I1125 22:07:09.204909 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:07:09 crc kubenswrapper[4910]: E1125 22:07:09.206517 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.639261 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-whvtb"] Nov 25 22:07:13 crc kubenswrapper[4910]: E1125 22:07:13.641996 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f3312dd-22cc-448e-a10d-675dae293aef" containerName="extract-content" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.642088 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f3312dd-22cc-448e-a10d-675dae293aef" containerName="extract-content" Nov 25 22:07:13 crc kubenswrapper[4910]: E1125 22:07:13.642166 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f3312dd-22cc-448e-a10d-675dae293aef" containerName="extract-utilities" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.642222 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f3312dd-22cc-448e-a10d-675dae293aef" containerName="extract-utilities" Nov 25 22:07:13 crc kubenswrapper[4910]: E1125 22:07:13.642315 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f3312dd-22cc-448e-a10d-675dae293aef" containerName="registry-server" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.642383 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f3312dd-22cc-448e-a10d-675dae293aef" containerName="registry-server" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.642665 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f3312dd-22cc-448e-a10d-675dae293aef" containerName="registry-server" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.645907 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.667355 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-whvtb"] Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.714864 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e61987-dabb-4e50-b8f2-a14acf19df0d-catalog-content\") pod \"redhat-operators-whvtb\" (UID: \"67e61987-dabb-4e50-b8f2-a14acf19df0d\") " pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.715164 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjq4z\" (UniqueName: \"kubernetes.io/projected/67e61987-dabb-4e50-b8f2-a14acf19df0d-kube-api-access-zjq4z\") pod \"redhat-operators-whvtb\" (UID: \"67e61987-dabb-4e50-b8f2-a14acf19df0d\") " pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.715602 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e61987-dabb-4e50-b8f2-a14acf19df0d-utilities\") pod \"redhat-operators-whvtb\" (UID: \"67e61987-dabb-4e50-b8f2-a14acf19df0d\") " pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.818725 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e61987-dabb-4e50-b8f2-a14acf19df0d-catalog-content\") pod \"redhat-operators-whvtb\" (UID: \"67e61987-dabb-4e50-b8f2-a14acf19df0d\") " pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.818856 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjq4z\" (UniqueName: \"kubernetes.io/projected/67e61987-dabb-4e50-b8f2-a14acf19df0d-kube-api-access-zjq4z\") pod \"redhat-operators-whvtb\" (UID: \"67e61987-dabb-4e50-b8f2-a14acf19df0d\") " pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.818975 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e61987-dabb-4e50-b8f2-a14acf19df0d-utilities\") pod \"redhat-operators-whvtb\" (UID: \"67e61987-dabb-4e50-b8f2-a14acf19df0d\") " pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.819447 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e61987-dabb-4e50-b8f2-a14acf19df0d-catalog-content\") pod \"redhat-operators-whvtb\" (UID: \"67e61987-dabb-4e50-b8f2-a14acf19df0d\") " pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.819723 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e61987-dabb-4e50-b8f2-a14acf19df0d-utilities\") pod \"redhat-operators-whvtb\" (UID: \"67e61987-dabb-4e50-b8f2-a14acf19df0d\") " pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.844378 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjq4z\" (UniqueName: \"kubernetes.io/projected/67e61987-dabb-4e50-b8f2-a14acf19df0d-kube-api-access-zjq4z\") pod \"redhat-operators-whvtb\" (UID: \"67e61987-dabb-4e50-b8f2-a14acf19df0d\") " pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:13 crc kubenswrapper[4910]: I1125 22:07:13.985438 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:14 crc kubenswrapper[4910]: I1125 22:07:14.603576 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-whvtb"] Nov 25 22:07:15 crc kubenswrapper[4910]: I1125 22:07:15.288119 4910 generic.go:334] "Generic (PLEG): container finished" podID="67e61987-dabb-4e50-b8f2-a14acf19df0d" containerID="2e277aec7c1a03ae81e85abe1fb4bd5e904fc26bfb8de36fa5b7096dac4e8172" exitCode=0 Nov 25 22:07:15 crc kubenswrapper[4910]: I1125 22:07:15.288270 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-whvtb" event={"ID":"67e61987-dabb-4e50-b8f2-a14acf19df0d","Type":"ContainerDied","Data":"2e277aec7c1a03ae81e85abe1fb4bd5e904fc26bfb8de36fa5b7096dac4e8172"} Nov 25 22:07:15 crc kubenswrapper[4910]: I1125 22:07:15.288694 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-whvtb" event={"ID":"67e61987-dabb-4e50-b8f2-a14acf19df0d","Type":"ContainerStarted","Data":"00df674e248a75c762a59589eb5a9c45d6b85dc435547cef39ded55f9d458a0e"} Nov 25 22:07:15 crc kubenswrapper[4910]: I1125 22:07:15.293077 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 22:07:17 crc kubenswrapper[4910]: I1125 22:07:17.320491 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-whvtb" event={"ID":"67e61987-dabb-4e50-b8f2-a14acf19df0d","Type":"ContainerStarted","Data":"9a0f7ff2d4f3c68c542dd211c7d096347b436bcbcb524e3756e87181f5898d80"} Nov 25 22:07:18 crc kubenswrapper[4910]: I1125 22:07:18.350353 4910 generic.go:334] "Generic (PLEG): container finished" podID="67e61987-dabb-4e50-b8f2-a14acf19df0d" containerID="9a0f7ff2d4f3c68c542dd211c7d096347b436bcbcb524e3756e87181f5898d80" exitCode=0 Nov 25 22:07:18 crc kubenswrapper[4910]: I1125 22:07:18.350413 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-whvtb" event={"ID":"67e61987-dabb-4e50-b8f2-a14acf19df0d","Type":"ContainerDied","Data":"9a0f7ff2d4f3c68c542dd211c7d096347b436bcbcb524e3756e87181f5898d80"} Nov 25 22:07:19 crc kubenswrapper[4910]: I1125 22:07:19.365792 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-whvtb" event={"ID":"67e61987-dabb-4e50-b8f2-a14acf19df0d","Type":"ContainerStarted","Data":"90c7b7206173797ab0f4f48d176c83c476fce11e21d548826807afe78d85e217"} Nov 25 22:07:19 crc kubenswrapper[4910]: I1125 22:07:19.405546 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-whvtb" podStartSLOduration=2.840800383 podStartE2EDuration="6.405509728s" podCreationTimestamp="2025-11-25 22:07:13 +0000 UTC" firstStartedPulling="2025-11-25 22:07:15.292707867 +0000 UTC m=+2190.755184199" lastFinishedPulling="2025-11-25 22:07:18.857417192 +0000 UTC m=+2194.319893544" observedRunningTime="2025-11-25 22:07:19.39088872 +0000 UTC m=+2194.853365082" watchObservedRunningTime="2025-11-25 22:07:19.405509728 +0000 UTC m=+2194.867986090" Nov 25 22:07:21 crc kubenswrapper[4910]: I1125 22:07:21.205800 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:07:21 crc kubenswrapper[4910]: E1125 22:07:21.206866 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:07:23 crc kubenswrapper[4910]: I1125 22:07:23.986460 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:23 crc kubenswrapper[4910]: I1125 22:07:23.986848 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:25 crc kubenswrapper[4910]: I1125 22:07:25.056923 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-whvtb" podUID="67e61987-dabb-4e50-b8f2-a14acf19df0d" containerName="registry-server" probeResult="failure" output=< Nov 25 22:07:25 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Nov 25 22:07:25 crc kubenswrapper[4910]: > Nov 25 22:07:34 crc kubenswrapper[4910]: I1125 22:07:34.047635 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:34 crc kubenswrapper[4910]: I1125 22:07:34.117517 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:34 crc kubenswrapper[4910]: I1125 22:07:34.204210 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:07:34 crc kubenswrapper[4910]: E1125 22:07:34.204927 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:07:34 crc kubenswrapper[4910]: I1125 22:07:34.290983 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-whvtb"] Nov 25 22:07:35 crc kubenswrapper[4910]: I1125 22:07:35.549224 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-whvtb" podUID="67e61987-dabb-4e50-b8f2-a14acf19df0d" containerName="registry-server" containerID="cri-o://90c7b7206173797ab0f4f48d176c83c476fce11e21d548826807afe78d85e217" gracePeriod=2 Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.044995 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.150491 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e61987-dabb-4e50-b8f2-a14acf19df0d-catalog-content\") pod \"67e61987-dabb-4e50-b8f2-a14acf19df0d\" (UID: \"67e61987-dabb-4e50-b8f2-a14acf19df0d\") " Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.150709 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjq4z\" (UniqueName: \"kubernetes.io/projected/67e61987-dabb-4e50-b8f2-a14acf19df0d-kube-api-access-zjq4z\") pod \"67e61987-dabb-4e50-b8f2-a14acf19df0d\" (UID: \"67e61987-dabb-4e50-b8f2-a14acf19df0d\") " Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.150808 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e61987-dabb-4e50-b8f2-a14acf19df0d-utilities\") pod \"67e61987-dabb-4e50-b8f2-a14acf19df0d\" (UID: \"67e61987-dabb-4e50-b8f2-a14acf19df0d\") " Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.151713 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67e61987-dabb-4e50-b8f2-a14acf19df0d-utilities" (OuterVolumeSpecName: "utilities") pod "67e61987-dabb-4e50-b8f2-a14acf19df0d" (UID: "67e61987-dabb-4e50-b8f2-a14acf19df0d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.158911 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67e61987-dabb-4e50-b8f2-a14acf19df0d-kube-api-access-zjq4z" (OuterVolumeSpecName: "kube-api-access-zjq4z") pod "67e61987-dabb-4e50-b8f2-a14acf19df0d" (UID: "67e61987-dabb-4e50-b8f2-a14acf19df0d"). InnerVolumeSpecName "kube-api-access-zjq4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.255374 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjq4z\" (UniqueName: \"kubernetes.io/projected/67e61987-dabb-4e50-b8f2-a14acf19df0d-kube-api-access-zjq4z\") on node \"crc\" DevicePath \"\"" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.255409 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e61987-dabb-4e50-b8f2-a14acf19df0d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.265974 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67e61987-dabb-4e50-b8f2-a14acf19df0d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "67e61987-dabb-4e50-b8f2-a14acf19df0d" (UID: "67e61987-dabb-4e50-b8f2-a14acf19df0d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.357178 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e61987-dabb-4e50-b8f2-a14acf19df0d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.567484 4910 generic.go:334] "Generic (PLEG): container finished" podID="67e61987-dabb-4e50-b8f2-a14acf19df0d" containerID="90c7b7206173797ab0f4f48d176c83c476fce11e21d548826807afe78d85e217" exitCode=0 Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.567565 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-whvtb" event={"ID":"67e61987-dabb-4e50-b8f2-a14acf19df0d","Type":"ContainerDied","Data":"90c7b7206173797ab0f4f48d176c83c476fce11e21d548826807afe78d85e217"} Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.567645 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-whvtb" event={"ID":"67e61987-dabb-4e50-b8f2-a14acf19df0d","Type":"ContainerDied","Data":"00df674e248a75c762a59589eb5a9c45d6b85dc435547cef39ded55f9d458a0e"} Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.567684 4910 scope.go:117] "RemoveContainer" containerID="90c7b7206173797ab0f4f48d176c83c476fce11e21d548826807afe78d85e217" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.567703 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-whvtb" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.616642 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-whvtb"] Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.618743 4910 scope.go:117] "RemoveContainer" containerID="9a0f7ff2d4f3c68c542dd211c7d096347b436bcbcb524e3756e87181f5898d80" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.638297 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-whvtb"] Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.669999 4910 scope.go:117] "RemoveContainer" containerID="2e277aec7c1a03ae81e85abe1fb4bd5e904fc26bfb8de36fa5b7096dac4e8172" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.709817 4910 scope.go:117] "RemoveContainer" containerID="90c7b7206173797ab0f4f48d176c83c476fce11e21d548826807afe78d85e217" Nov 25 22:07:36 crc kubenswrapper[4910]: E1125 22:07:36.710293 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90c7b7206173797ab0f4f48d176c83c476fce11e21d548826807afe78d85e217\": container with ID starting with 90c7b7206173797ab0f4f48d176c83c476fce11e21d548826807afe78d85e217 not found: ID does not exist" containerID="90c7b7206173797ab0f4f48d176c83c476fce11e21d548826807afe78d85e217" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.710448 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90c7b7206173797ab0f4f48d176c83c476fce11e21d548826807afe78d85e217"} err="failed to get container status \"90c7b7206173797ab0f4f48d176c83c476fce11e21d548826807afe78d85e217\": rpc error: code = NotFound desc = could not find container \"90c7b7206173797ab0f4f48d176c83c476fce11e21d548826807afe78d85e217\": container with ID starting with 90c7b7206173797ab0f4f48d176c83c476fce11e21d548826807afe78d85e217 not found: ID does not exist" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.710593 4910 scope.go:117] "RemoveContainer" containerID="9a0f7ff2d4f3c68c542dd211c7d096347b436bcbcb524e3756e87181f5898d80" Nov 25 22:07:36 crc kubenswrapper[4910]: E1125 22:07:36.711006 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a0f7ff2d4f3c68c542dd211c7d096347b436bcbcb524e3756e87181f5898d80\": container with ID starting with 9a0f7ff2d4f3c68c542dd211c7d096347b436bcbcb524e3756e87181f5898d80 not found: ID does not exist" containerID="9a0f7ff2d4f3c68c542dd211c7d096347b436bcbcb524e3756e87181f5898d80" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.711045 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a0f7ff2d4f3c68c542dd211c7d096347b436bcbcb524e3756e87181f5898d80"} err="failed to get container status \"9a0f7ff2d4f3c68c542dd211c7d096347b436bcbcb524e3756e87181f5898d80\": rpc error: code = NotFound desc = could not find container \"9a0f7ff2d4f3c68c542dd211c7d096347b436bcbcb524e3756e87181f5898d80\": container with ID starting with 9a0f7ff2d4f3c68c542dd211c7d096347b436bcbcb524e3756e87181f5898d80 not found: ID does not exist" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.711068 4910 scope.go:117] "RemoveContainer" containerID="2e277aec7c1a03ae81e85abe1fb4bd5e904fc26bfb8de36fa5b7096dac4e8172" Nov 25 22:07:36 crc kubenswrapper[4910]: E1125 22:07:36.711385 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e277aec7c1a03ae81e85abe1fb4bd5e904fc26bfb8de36fa5b7096dac4e8172\": container with ID starting with 2e277aec7c1a03ae81e85abe1fb4bd5e904fc26bfb8de36fa5b7096dac4e8172 not found: ID does not exist" containerID="2e277aec7c1a03ae81e85abe1fb4bd5e904fc26bfb8de36fa5b7096dac4e8172" Nov 25 22:07:36 crc kubenswrapper[4910]: I1125 22:07:36.711493 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e277aec7c1a03ae81e85abe1fb4bd5e904fc26bfb8de36fa5b7096dac4e8172"} err="failed to get container status \"2e277aec7c1a03ae81e85abe1fb4bd5e904fc26bfb8de36fa5b7096dac4e8172\": rpc error: code = NotFound desc = could not find container \"2e277aec7c1a03ae81e85abe1fb4bd5e904fc26bfb8de36fa5b7096dac4e8172\": container with ID starting with 2e277aec7c1a03ae81e85abe1fb4bd5e904fc26bfb8de36fa5b7096dac4e8172 not found: ID does not exist" Nov 25 22:07:37 crc kubenswrapper[4910]: I1125 22:07:37.225131 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67e61987-dabb-4e50-b8f2-a14acf19df0d" path="/var/lib/kubelet/pods/67e61987-dabb-4e50-b8f2-a14acf19df0d/volumes" Nov 25 22:07:45 crc kubenswrapper[4910]: I1125 22:07:45.213584 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:07:45 crc kubenswrapper[4910]: E1125 22:07:45.214622 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:07:58 crc kubenswrapper[4910]: I1125 22:07:58.204213 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:07:58 crc kubenswrapper[4910]: E1125 22:07:58.206047 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:08:09 crc kubenswrapper[4910]: I1125 22:08:09.204601 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:08:09 crc kubenswrapper[4910]: E1125 22:08:09.205989 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:08:21 crc kubenswrapper[4910]: I1125 22:08:21.207692 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:08:21 crc kubenswrapper[4910]: E1125 22:08:21.208976 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:08:32 crc kubenswrapper[4910]: I1125 22:08:32.204976 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:08:32 crc kubenswrapper[4910]: E1125 22:08:32.206624 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:08:46 crc kubenswrapper[4910]: I1125 22:08:46.205236 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:08:46 crc kubenswrapper[4910]: E1125 22:08:46.206211 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:08:55 crc kubenswrapper[4910]: I1125 22:08:55.637046 4910 generic.go:334] "Generic (PLEG): container finished" podID="1132a133-2fdf-4a87-b132-d1f1c0a26c76" containerID="975368ba24bac4df5a2b889d5769ac4a72b262288d6831ecd046dd3fcbec6a82" exitCode=0 Nov 25 22:08:55 crc kubenswrapper[4910]: I1125 22:08:55.637106 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" event={"ID":"1132a133-2fdf-4a87-b132-d1f1c0a26c76","Type":"ContainerDied","Data":"975368ba24bac4df5a2b889d5769ac4a72b262288d6831ecd046dd3fcbec6a82"} Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.195152 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.326830 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-libvirt-combined-ca-bundle\") pod \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.326915 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-inventory\") pod \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.326957 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mtsnw\" (UniqueName: \"kubernetes.io/projected/1132a133-2fdf-4a87-b132-d1f1c0a26c76-kube-api-access-mtsnw\") pod \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.327065 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-ssh-key\") pod \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.327112 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-libvirt-secret-0\") pod \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\" (UID: \"1132a133-2fdf-4a87-b132-d1f1c0a26c76\") " Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.335867 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1132a133-2fdf-4a87-b132-d1f1c0a26c76-kube-api-access-mtsnw" (OuterVolumeSpecName: "kube-api-access-mtsnw") pod "1132a133-2fdf-4a87-b132-d1f1c0a26c76" (UID: "1132a133-2fdf-4a87-b132-d1f1c0a26c76"). InnerVolumeSpecName "kube-api-access-mtsnw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.342397 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "1132a133-2fdf-4a87-b132-d1f1c0a26c76" (UID: "1132a133-2fdf-4a87-b132-d1f1c0a26c76"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.360919 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1132a133-2fdf-4a87-b132-d1f1c0a26c76" (UID: "1132a133-2fdf-4a87-b132-d1f1c0a26c76"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.364450 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "1132a133-2fdf-4a87-b132-d1f1c0a26c76" (UID: "1132a133-2fdf-4a87-b132-d1f1c0a26c76"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.380486 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-inventory" (OuterVolumeSpecName: "inventory") pod "1132a133-2fdf-4a87-b132-d1f1c0a26c76" (UID: "1132a133-2fdf-4a87-b132-d1f1c0a26c76"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.429801 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.429870 4910 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.429888 4910 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.429910 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1132a133-2fdf-4a87-b132-d1f1c0a26c76-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.429924 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mtsnw\" (UniqueName: \"kubernetes.io/projected/1132a133-2fdf-4a87-b132-d1f1c0a26c76-kube-api-access-mtsnw\") on node \"crc\" DevicePath \"\"" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.666470 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" event={"ID":"1132a133-2fdf-4a87-b132-d1f1c0a26c76","Type":"ContainerDied","Data":"b006ee690cc86fa62ba23a97df1fec58610ec977d5fcc824a7c3f138fc7bc1ee"} Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.666861 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b006ee690cc86fa62ba23a97df1fec58610ec977d5fcc824a7c3f138fc7bc1ee" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.666571 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-h78fk" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.802949 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk"] Nov 25 22:08:57 crc kubenswrapper[4910]: E1125 22:08:57.803444 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67e61987-dabb-4e50-b8f2-a14acf19df0d" containerName="extract-utilities" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.803466 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="67e61987-dabb-4e50-b8f2-a14acf19df0d" containerName="extract-utilities" Nov 25 22:08:57 crc kubenswrapper[4910]: E1125 22:08:57.803507 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67e61987-dabb-4e50-b8f2-a14acf19df0d" containerName="extract-content" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.803517 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="67e61987-dabb-4e50-b8f2-a14acf19df0d" containerName="extract-content" Nov 25 22:08:57 crc kubenswrapper[4910]: E1125 22:08:57.803539 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67e61987-dabb-4e50-b8f2-a14acf19df0d" containerName="registry-server" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.803547 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="67e61987-dabb-4e50-b8f2-a14acf19df0d" containerName="registry-server" Nov 25 22:08:57 crc kubenswrapper[4910]: E1125 22:08:57.803566 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1132a133-2fdf-4a87-b132-d1f1c0a26c76" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.803578 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="1132a133-2fdf-4a87-b132-d1f1c0a26c76" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.803815 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="67e61987-dabb-4e50-b8f2-a14acf19df0d" containerName="registry-server" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.803846 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="1132a133-2fdf-4a87-b132-d1f1c0a26c76" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.804640 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.807292 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.815586 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.815905 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.816066 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.816298 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.816474 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.819342 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.830983 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk"] Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.945020 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.945134 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.946469 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.946574 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.946800 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.946915 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.946977 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.947031 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:57 crc kubenswrapper[4910]: I1125 22:08:57.947325 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfwp8\" (UniqueName: \"kubernetes.io/projected/0a190739-9d08-41b3-a45d-42d0b636ccad-kube-api-access-zfwp8\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.049765 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.050234 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfwp8\" (UniqueName: \"kubernetes.io/projected/0a190739-9d08-41b3-a45d-42d0b636ccad-kube-api-access-zfwp8\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.050367 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.050487 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.050719 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.051225 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.051395 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.051538 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.052316 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.052540 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.056370 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.057079 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.058117 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.060775 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.064344 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.064964 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.067314 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.083338 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfwp8\" (UniqueName: \"kubernetes.io/projected/0a190739-9d08-41b3-a45d-42d0b636ccad-kube-api-access-zfwp8\") pod \"nova-edpm-deployment-openstack-edpm-ipam-sv2kk\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.136078 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:08:58 crc kubenswrapper[4910]: I1125 22:08:58.847571 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk"] Nov 25 22:08:59 crc kubenswrapper[4910]: I1125 22:08:59.697350 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" event={"ID":"0a190739-9d08-41b3-a45d-42d0b636ccad","Type":"ContainerStarted","Data":"3c1c64065492673b12c0eb9b884fc97dc024ba133a6a6a375b1efacba91ec24f"} Nov 25 22:08:59 crc kubenswrapper[4910]: I1125 22:08:59.697712 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" event={"ID":"0a190739-9d08-41b3-a45d-42d0b636ccad","Type":"ContainerStarted","Data":"0761642c47b3e1bc65d81d41e9cfac8b812c1c9104a0ca04fc7b7e3fa89ffce1"} Nov 25 22:08:59 crc kubenswrapper[4910]: I1125 22:08:59.752535 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" podStartSLOduration=2.275301416 podStartE2EDuration="2.752511666s" podCreationTimestamp="2025-11-25 22:08:57 +0000 UTC" firstStartedPulling="2025-11-25 22:08:58.844270004 +0000 UTC m=+2294.306746346" lastFinishedPulling="2025-11-25 22:08:59.321480274 +0000 UTC m=+2294.783956596" observedRunningTime="2025-11-25 22:08:59.731796803 +0000 UTC m=+2295.194273165" watchObservedRunningTime="2025-11-25 22:08:59.752511666 +0000 UTC m=+2295.214987988" Nov 25 22:09:00 crc kubenswrapper[4910]: I1125 22:09:00.206191 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:09:00 crc kubenswrapper[4910]: E1125 22:09:00.207005 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:09:14 crc kubenswrapper[4910]: I1125 22:09:14.205749 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:09:14 crc kubenswrapper[4910]: E1125 22:09:14.207263 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:09:26 crc kubenswrapper[4910]: I1125 22:09:26.205020 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:09:26 crc kubenswrapper[4910]: E1125 22:09:26.205705 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:09:29 crc kubenswrapper[4910]: I1125 22:09:29.669365 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vfdqf"] Nov 25 22:09:29 crc kubenswrapper[4910]: I1125 22:09:29.673756 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:29 crc kubenswrapper[4910]: I1125 22:09:29.694674 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vfdqf"] Nov 25 22:09:29 crc kubenswrapper[4910]: I1125 22:09:29.755393 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hsrcw\" (UniqueName: \"kubernetes.io/projected/25215931-decf-468f-a3a5-b778736d3c32-kube-api-access-hsrcw\") pod \"community-operators-vfdqf\" (UID: \"25215931-decf-468f-a3a5-b778736d3c32\") " pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:29 crc kubenswrapper[4910]: I1125 22:09:29.755695 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25215931-decf-468f-a3a5-b778736d3c32-catalog-content\") pod \"community-operators-vfdqf\" (UID: \"25215931-decf-468f-a3a5-b778736d3c32\") " pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:29 crc kubenswrapper[4910]: I1125 22:09:29.755942 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25215931-decf-468f-a3a5-b778736d3c32-utilities\") pod \"community-operators-vfdqf\" (UID: \"25215931-decf-468f-a3a5-b778736d3c32\") " pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:29 crc kubenswrapper[4910]: I1125 22:09:29.857937 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25215931-decf-468f-a3a5-b778736d3c32-utilities\") pod \"community-operators-vfdqf\" (UID: \"25215931-decf-468f-a3a5-b778736d3c32\") " pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:29 crc kubenswrapper[4910]: I1125 22:09:29.858066 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hsrcw\" (UniqueName: \"kubernetes.io/projected/25215931-decf-468f-a3a5-b778736d3c32-kube-api-access-hsrcw\") pod \"community-operators-vfdqf\" (UID: \"25215931-decf-468f-a3a5-b778736d3c32\") " pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:29 crc kubenswrapper[4910]: I1125 22:09:29.858100 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25215931-decf-468f-a3a5-b778736d3c32-catalog-content\") pod \"community-operators-vfdqf\" (UID: \"25215931-decf-468f-a3a5-b778736d3c32\") " pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:29 crc kubenswrapper[4910]: I1125 22:09:29.858842 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25215931-decf-468f-a3a5-b778736d3c32-catalog-content\") pod \"community-operators-vfdqf\" (UID: \"25215931-decf-468f-a3a5-b778736d3c32\") " pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:29 crc kubenswrapper[4910]: I1125 22:09:29.858831 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25215931-decf-468f-a3a5-b778736d3c32-utilities\") pod \"community-operators-vfdqf\" (UID: \"25215931-decf-468f-a3a5-b778736d3c32\") " pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:29 crc kubenswrapper[4910]: I1125 22:09:29.883198 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hsrcw\" (UniqueName: \"kubernetes.io/projected/25215931-decf-468f-a3a5-b778736d3c32-kube-api-access-hsrcw\") pod \"community-operators-vfdqf\" (UID: \"25215931-decf-468f-a3a5-b778736d3c32\") " pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:30 crc kubenswrapper[4910]: I1125 22:09:30.068988 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:30 crc kubenswrapper[4910]: I1125 22:09:30.616514 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vfdqf"] Nov 25 22:09:30 crc kubenswrapper[4910]: W1125 22:09:30.631326 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod25215931_decf_468f_a3a5_b778736d3c32.slice/crio-b3bfe469e1b682cfb8980d589dca055eef80dbb13a4b65264d2f47dbca416aa6 WatchSource:0}: Error finding container b3bfe469e1b682cfb8980d589dca055eef80dbb13a4b65264d2f47dbca416aa6: Status 404 returned error can't find the container with id b3bfe469e1b682cfb8980d589dca055eef80dbb13a4b65264d2f47dbca416aa6 Nov 25 22:09:31 crc kubenswrapper[4910]: I1125 22:09:31.107686 4910 generic.go:334] "Generic (PLEG): container finished" podID="25215931-decf-468f-a3a5-b778736d3c32" containerID="24573fbaef519c9653a5fdcba83221770f3475aa1917e24c0cbd1d56f0ca3ec0" exitCode=0 Nov 25 22:09:31 crc kubenswrapper[4910]: I1125 22:09:31.107765 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfdqf" event={"ID":"25215931-decf-468f-a3a5-b778736d3c32","Type":"ContainerDied","Data":"24573fbaef519c9653a5fdcba83221770f3475aa1917e24c0cbd1d56f0ca3ec0"} Nov 25 22:09:31 crc kubenswrapper[4910]: I1125 22:09:31.107817 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfdqf" event={"ID":"25215931-decf-468f-a3a5-b778736d3c32","Type":"ContainerStarted","Data":"b3bfe469e1b682cfb8980d589dca055eef80dbb13a4b65264d2f47dbca416aa6"} Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.075867 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4x6rr"] Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.081217 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.090072 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4x6rr"] Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.136409 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfdqf" event={"ID":"25215931-decf-468f-a3a5-b778736d3c32","Type":"ContainerStarted","Data":"ba508f6c7ba115d2fb8e2ce5c3f34cb464e580faef5094f28a84b5151ccf4bd8"} Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.221920 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dcbbm\" (UniqueName: \"kubernetes.io/projected/eb6ed988-974b-42d9-bb2d-420dab06c76d-kube-api-access-dcbbm\") pod \"redhat-marketplace-4x6rr\" (UID: \"eb6ed988-974b-42d9-bb2d-420dab06c76d\") " pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.222149 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb6ed988-974b-42d9-bb2d-420dab06c76d-utilities\") pod \"redhat-marketplace-4x6rr\" (UID: \"eb6ed988-974b-42d9-bb2d-420dab06c76d\") " pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.222289 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb6ed988-974b-42d9-bb2d-420dab06c76d-catalog-content\") pod \"redhat-marketplace-4x6rr\" (UID: \"eb6ed988-974b-42d9-bb2d-420dab06c76d\") " pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.324166 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb6ed988-974b-42d9-bb2d-420dab06c76d-utilities\") pod \"redhat-marketplace-4x6rr\" (UID: \"eb6ed988-974b-42d9-bb2d-420dab06c76d\") " pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.324299 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb6ed988-974b-42d9-bb2d-420dab06c76d-catalog-content\") pod \"redhat-marketplace-4x6rr\" (UID: \"eb6ed988-974b-42d9-bb2d-420dab06c76d\") " pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.324845 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dcbbm\" (UniqueName: \"kubernetes.io/projected/eb6ed988-974b-42d9-bb2d-420dab06c76d-kube-api-access-dcbbm\") pod \"redhat-marketplace-4x6rr\" (UID: \"eb6ed988-974b-42d9-bb2d-420dab06c76d\") " pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.325417 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb6ed988-974b-42d9-bb2d-420dab06c76d-catalog-content\") pod \"redhat-marketplace-4x6rr\" (UID: \"eb6ed988-974b-42d9-bb2d-420dab06c76d\") " pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.325916 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb6ed988-974b-42d9-bb2d-420dab06c76d-utilities\") pod \"redhat-marketplace-4x6rr\" (UID: \"eb6ed988-974b-42d9-bb2d-420dab06c76d\") " pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.345019 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dcbbm\" (UniqueName: \"kubernetes.io/projected/eb6ed988-974b-42d9-bb2d-420dab06c76d-kube-api-access-dcbbm\") pod \"redhat-marketplace-4x6rr\" (UID: \"eb6ed988-974b-42d9-bb2d-420dab06c76d\") " pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.451764 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:32 crc kubenswrapper[4910]: I1125 22:09:32.946512 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4x6rr"] Nov 25 22:09:33 crc kubenswrapper[4910]: I1125 22:09:33.149869 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4x6rr" event={"ID":"eb6ed988-974b-42d9-bb2d-420dab06c76d","Type":"ContainerStarted","Data":"f9c5f93540f3440a86672a44084a824437f48cd45ca20dc1ee972c407bad35f8"} Nov 25 22:09:33 crc kubenswrapper[4910]: I1125 22:09:33.152084 4910 generic.go:334] "Generic (PLEG): container finished" podID="25215931-decf-468f-a3a5-b778736d3c32" containerID="ba508f6c7ba115d2fb8e2ce5c3f34cb464e580faef5094f28a84b5151ccf4bd8" exitCode=0 Nov 25 22:09:33 crc kubenswrapper[4910]: I1125 22:09:33.152140 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfdqf" event={"ID":"25215931-decf-468f-a3a5-b778736d3c32","Type":"ContainerDied","Data":"ba508f6c7ba115d2fb8e2ce5c3f34cb464e580faef5094f28a84b5151ccf4bd8"} Nov 25 22:09:34 crc kubenswrapper[4910]: I1125 22:09:34.165395 4910 generic.go:334] "Generic (PLEG): container finished" podID="eb6ed988-974b-42d9-bb2d-420dab06c76d" containerID="d33b9bdb21cd7362865eabf536ef63b690c78577d58b48f2be7d93e89e2e795f" exitCode=0 Nov 25 22:09:34 crc kubenswrapper[4910]: I1125 22:09:34.165485 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4x6rr" event={"ID":"eb6ed988-974b-42d9-bb2d-420dab06c76d","Type":"ContainerDied","Data":"d33b9bdb21cd7362865eabf536ef63b690c78577d58b48f2be7d93e89e2e795f"} Nov 25 22:09:34 crc kubenswrapper[4910]: I1125 22:09:34.172379 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfdqf" event={"ID":"25215931-decf-468f-a3a5-b778736d3c32","Type":"ContainerStarted","Data":"937546ac468e59700d6dc6e9b8fe6b49684a80a11dac2f84a4ecd65c8f87bcc7"} Nov 25 22:09:34 crc kubenswrapper[4910]: I1125 22:09:34.226487 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vfdqf" podStartSLOduration=2.761514446 podStartE2EDuration="5.226462139s" podCreationTimestamp="2025-11-25 22:09:29 +0000 UTC" firstStartedPulling="2025-11-25 22:09:31.11052184 +0000 UTC m=+2326.572998182" lastFinishedPulling="2025-11-25 22:09:33.575469543 +0000 UTC m=+2329.037945875" observedRunningTime="2025-11-25 22:09:34.215544842 +0000 UTC m=+2329.678021184" watchObservedRunningTime="2025-11-25 22:09:34.226462139 +0000 UTC m=+2329.688938471" Nov 25 22:09:35 crc kubenswrapper[4910]: I1125 22:09:35.185531 4910 generic.go:334] "Generic (PLEG): container finished" podID="eb6ed988-974b-42d9-bb2d-420dab06c76d" containerID="7c6f9483b7c6f1544b6599061482489815e9f2dff0c795a69616edd9dcfc8a75" exitCode=0 Nov 25 22:09:35 crc kubenswrapper[4910]: I1125 22:09:35.185729 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4x6rr" event={"ID":"eb6ed988-974b-42d9-bb2d-420dab06c76d","Type":"ContainerDied","Data":"7c6f9483b7c6f1544b6599061482489815e9f2dff0c795a69616edd9dcfc8a75"} Nov 25 22:09:36 crc kubenswrapper[4910]: I1125 22:09:36.204017 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4x6rr" event={"ID":"eb6ed988-974b-42d9-bb2d-420dab06c76d","Type":"ContainerStarted","Data":"8be4c8c6dfa328da8332fb903cc2e777381447a0c055e9ee23c32d2c72ff8d7b"} Nov 25 22:09:36 crc kubenswrapper[4910]: I1125 22:09:36.239135 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4x6rr" podStartSLOduration=2.765651935 podStartE2EDuration="4.23911341s" podCreationTimestamp="2025-11-25 22:09:32 +0000 UTC" firstStartedPulling="2025-11-25 22:09:34.1709741 +0000 UTC m=+2329.633450432" lastFinishedPulling="2025-11-25 22:09:35.644435585 +0000 UTC m=+2331.106911907" observedRunningTime="2025-11-25 22:09:36.234278008 +0000 UTC m=+2331.696754340" watchObservedRunningTime="2025-11-25 22:09:36.23911341 +0000 UTC m=+2331.701589732" Nov 25 22:09:38 crc kubenswrapper[4910]: I1125 22:09:38.205742 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:09:38 crc kubenswrapper[4910]: E1125 22:09:38.207089 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:09:40 crc kubenswrapper[4910]: I1125 22:09:40.070150 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:40 crc kubenswrapper[4910]: I1125 22:09:40.070594 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:40 crc kubenswrapper[4910]: I1125 22:09:40.163825 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:40 crc kubenswrapper[4910]: I1125 22:09:40.320143 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:40 crc kubenswrapper[4910]: I1125 22:09:40.854067 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vfdqf"] Nov 25 22:09:42 crc kubenswrapper[4910]: I1125 22:09:42.277442 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vfdqf" podUID="25215931-decf-468f-a3a5-b778736d3c32" containerName="registry-server" containerID="cri-o://937546ac468e59700d6dc6e9b8fe6b49684a80a11dac2f84a4ecd65c8f87bcc7" gracePeriod=2 Nov 25 22:09:42 crc kubenswrapper[4910]: I1125 22:09:42.452808 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:42 crc kubenswrapper[4910]: I1125 22:09:42.452892 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:42 crc kubenswrapper[4910]: I1125 22:09:42.518567 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:42 crc kubenswrapper[4910]: I1125 22:09:42.869801 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:42 crc kubenswrapper[4910]: I1125 22:09:42.939897 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hsrcw\" (UniqueName: \"kubernetes.io/projected/25215931-decf-468f-a3a5-b778736d3c32-kube-api-access-hsrcw\") pod \"25215931-decf-468f-a3a5-b778736d3c32\" (UID: \"25215931-decf-468f-a3a5-b778736d3c32\") " Nov 25 22:09:42 crc kubenswrapper[4910]: I1125 22:09:42.940185 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25215931-decf-468f-a3a5-b778736d3c32-catalog-content\") pod \"25215931-decf-468f-a3a5-b778736d3c32\" (UID: \"25215931-decf-468f-a3a5-b778736d3c32\") " Nov 25 22:09:42 crc kubenswrapper[4910]: I1125 22:09:42.940389 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25215931-decf-468f-a3a5-b778736d3c32-utilities\") pod \"25215931-decf-468f-a3a5-b778736d3c32\" (UID: \"25215931-decf-468f-a3a5-b778736d3c32\") " Nov 25 22:09:42 crc kubenswrapper[4910]: I1125 22:09:42.942158 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/25215931-decf-468f-a3a5-b778736d3c32-utilities" (OuterVolumeSpecName: "utilities") pod "25215931-decf-468f-a3a5-b778736d3c32" (UID: "25215931-decf-468f-a3a5-b778736d3c32"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:09:42 crc kubenswrapper[4910]: I1125 22:09:42.947629 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25215931-decf-468f-a3a5-b778736d3c32-kube-api-access-hsrcw" (OuterVolumeSpecName: "kube-api-access-hsrcw") pod "25215931-decf-468f-a3a5-b778736d3c32" (UID: "25215931-decf-468f-a3a5-b778736d3c32"). InnerVolumeSpecName "kube-api-access-hsrcw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.037063 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/25215931-decf-468f-a3a5-b778736d3c32-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "25215931-decf-468f-a3a5-b778736d3c32" (UID: "25215931-decf-468f-a3a5-b778736d3c32"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.044218 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/25215931-decf-468f-a3a5-b778736d3c32-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.044284 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hsrcw\" (UniqueName: \"kubernetes.io/projected/25215931-decf-468f-a3a5-b778736d3c32-kube-api-access-hsrcw\") on node \"crc\" DevicePath \"\"" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.044304 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/25215931-decf-468f-a3a5-b778736d3c32-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.292949 4910 generic.go:334] "Generic (PLEG): container finished" podID="25215931-decf-468f-a3a5-b778736d3c32" containerID="937546ac468e59700d6dc6e9b8fe6b49684a80a11dac2f84a4ecd65c8f87bcc7" exitCode=0 Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.293071 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vfdqf" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.293048 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfdqf" event={"ID":"25215931-decf-468f-a3a5-b778736d3c32","Type":"ContainerDied","Data":"937546ac468e59700d6dc6e9b8fe6b49684a80a11dac2f84a4ecd65c8f87bcc7"} Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.293285 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vfdqf" event={"ID":"25215931-decf-468f-a3a5-b778736d3c32","Type":"ContainerDied","Data":"b3bfe469e1b682cfb8980d589dca055eef80dbb13a4b65264d2f47dbca416aa6"} Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.293322 4910 scope.go:117] "RemoveContainer" containerID="937546ac468e59700d6dc6e9b8fe6b49684a80a11dac2f84a4ecd65c8f87bcc7" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.326005 4910 scope.go:117] "RemoveContainer" containerID="ba508f6c7ba115d2fb8e2ce5c3f34cb464e580faef5094f28a84b5151ccf4bd8" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.327498 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vfdqf"] Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.339961 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vfdqf"] Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.351065 4910 scope.go:117] "RemoveContainer" containerID="24573fbaef519c9653a5fdcba83221770f3475aa1917e24c0cbd1d56f0ca3ec0" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.353215 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.394231 4910 scope.go:117] "RemoveContainer" containerID="937546ac468e59700d6dc6e9b8fe6b49684a80a11dac2f84a4ecd65c8f87bcc7" Nov 25 22:09:43 crc kubenswrapper[4910]: E1125 22:09:43.394984 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"937546ac468e59700d6dc6e9b8fe6b49684a80a11dac2f84a4ecd65c8f87bcc7\": container with ID starting with 937546ac468e59700d6dc6e9b8fe6b49684a80a11dac2f84a4ecd65c8f87bcc7 not found: ID does not exist" containerID="937546ac468e59700d6dc6e9b8fe6b49684a80a11dac2f84a4ecd65c8f87bcc7" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.395030 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"937546ac468e59700d6dc6e9b8fe6b49684a80a11dac2f84a4ecd65c8f87bcc7"} err="failed to get container status \"937546ac468e59700d6dc6e9b8fe6b49684a80a11dac2f84a4ecd65c8f87bcc7\": rpc error: code = NotFound desc = could not find container \"937546ac468e59700d6dc6e9b8fe6b49684a80a11dac2f84a4ecd65c8f87bcc7\": container with ID starting with 937546ac468e59700d6dc6e9b8fe6b49684a80a11dac2f84a4ecd65c8f87bcc7 not found: ID does not exist" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.395061 4910 scope.go:117] "RemoveContainer" containerID="ba508f6c7ba115d2fb8e2ce5c3f34cb464e580faef5094f28a84b5151ccf4bd8" Nov 25 22:09:43 crc kubenswrapper[4910]: E1125 22:09:43.395850 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba508f6c7ba115d2fb8e2ce5c3f34cb464e580faef5094f28a84b5151ccf4bd8\": container with ID starting with ba508f6c7ba115d2fb8e2ce5c3f34cb464e580faef5094f28a84b5151ccf4bd8 not found: ID does not exist" containerID="ba508f6c7ba115d2fb8e2ce5c3f34cb464e580faef5094f28a84b5151ccf4bd8" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.395911 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba508f6c7ba115d2fb8e2ce5c3f34cb464e580faef5094f28a84b5151ccf4bd8"} err="failed to get container status \"ba508f6c7ba115d2fb8e2ce5c3f34cb464e580faef5094f28a84b5151ccf4bd8\": rpc error: code = NotFound desc = could not find container \"ba508f6c7ba115d2fb8e2ce5c3f34cb464e580faef5094f28a84b5151ccf4bd8\": container with ID starting with ba508f6c7ba115d2fb8e2ce5c3f34cb464e580faef5094f28a84b5151ccf4bd8 not found: ID does not exist" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.395951 4910 scope.go:117] "RemoveContainer" containerID="24573fbaef519c9653a5fdcba83221770f3475aa1917e24c0cbd1d56f0ca3ec0" Nov 25 22:09:43 crc kubenswrapper[4910]: E1125 22:09:43.396588 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24573fbaef519c9653a5fdcba83221770f3475aa1917e24c0cbd1d56f0ca3ec0\": container with ID starting with 24573fbaef519c9653a5fdcba83221770f3475aa1917e24c0cbd1d56f0ca3ec0 not found: ID does not exist" containerID="24573fbaef519c9653a5fdcba83221770f3475aa1917e24c0cbd1d56f0ca3ec0" Nov 25 22:09:43 crc kubenswrapper[4910]: I1125 22:09:43.396654 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24573fbaef519c9653a5fdcba83221770f3475aa1917e24c0cbd1d56f0ca3ec0"} err="failed to get container status \"24573fbaef519c9653a5fdcba83221770f3475aa1917e24c0cbd1d56f0ca3ec0\": rpc error: code = NotFound desc = could not find container \"24573fbaef519c9653a5fdcba83221770f3475aa1917e24c0cbd1d56f0ca3ec0\": container with ID starting with 24573fbaef519c9653a5fdcba83221770f3475aa1917e24c0cbd1d56f0ca3ec0 not found: ID does not exist" Nov 25 22:09:45 crc kubenswrapper[4910]: I1125 22:09:45.229021 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25215931-decf-468f-a3a5-b778736d3c32" path="/var/lib/kubelet/pods/25215931-decf-468f-a3a5-b778736d3c32/volumes" Nov 25 22:09:45 crc kubenswrapper[4910]: I1125 22:09:45.661686 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4x6rr"] Nov 25 22:09:45 crc kubenswrapper[4910]: I1125 22:09:45.662000 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4x6rr" podUID="eb6ed988-974b-42d9-bb2d-420dab06c76d" containerName="registry-server" containerID="cri-o://8be4c8c6dfa328da8332fb903cc2e777381447a0c055e9ee23c32d2c72ff8d7b" gracePeriod=2 Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.136366 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.231940 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb6ed988-974b-42d9-bb2d-420dab06c76d-utilities\") pod \"eb6ed988-974b-42d9-bb2d-420dab06c76d\" (UID: \"eb6ed988-974b-42d9-bb2d-420dab06c76d\") " Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.232061 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb6ed988-974b-42d9-bb2d-420dab06c76d-catalog-content\") pod \"eb6ed988-974b-42d9-bb2d-420dab06c76d\" (UID: \"eb6ed988-974b-42d9-bb2d-420dab06c76d\") " Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.232126 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dcbbm\" (UniqueName: \"kubernetes.io/projected/eb6ed988-974b-42d9-bb2d-420dab06c76d-kube-api-access-dcbbm\") pod \"eb6ed988-974b-42d9-bb2d-420dab06c76d\" (UID: \"eb6ed988-974b-42d9-bb2d-420dab06c76d\") " Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.232990 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb6ed988-974b-42d9-bb2d-420dab06c76d-utilities" (OuterVolumeSpecName: "utilities") pod "eb6ed988-974b-42d9-bb2d-420dab06c76d" (UID: "eb6ed988-974b-42d9-bb2d-420dab06c76d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.240519 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb6ed988-974b-42d9-bb2d-420dab06c76d-kube-api-access-dcbbm" (OuterVolumeSpecName: "kube-api-access-dcbbm") pod "eb6ed988-974b-42d9-bb2d-420dab06c76d" (UID: "eb6ed988-974b-42d9-bb2d-420dab06c76d"). InnerVolumeSpecName "kube-api-access-dcbbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.251039 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eb6ed988-974b-42d9-bb2d-420dab06c76d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eb6ed988-974b-42d9-bb2d-420dab06c76d" (UID: "eb6ed988-974b-42d9-bb2d-420dab06c76d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.334913 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eb6ed988-974b-42d9-bb2d-420dab06c76d-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.334959 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eb6ed988-974b-42d9-bb2d-420dab06c76d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.334973 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dcbbm\" (UniqueName: \"kubernetes.io/projected/eb6ed988-974b-42d9-bb2d-420dab06c76d-kube-api-access-dcbbm\") on node \"crc\" DevicePath \"\"" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.341888 4910 generic.go:334] "Generic (PLEG): container finished" podID="eb6ed988-974b-42d9-bb2d-420dab06c76d" containerID="8be4c8c6dfa328da8332fb903cc2e777381447a0c055e9ee23c32d2c72ff8d7b" exitCode=0 Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.341937 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4x6rr" event={"ID":"eb6ed988-974b-42d9-bb2d-420dab06c76d","Type":"ContainerDied","Data":"8be4c8c6dfa328da8332fb903cc2e777381447a0c055e9ee23c32d2c72ff8d7b"} Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.341971 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4x6rr" event={"ID":"eb6ed988-974b-42d9-bb2d-420dab06c76d","Type":"ContainerDied","Data":"f9c5f93540f3440a86672a44084a824437f48cd45ca20dc1ee972c407bad35f8"} Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.341993 4910 scope.go:117] "RemoveContainer" containerID="8be4c8c6dfa328da8332fb903cc2e777381447a0c055e9ee23c32d2c72ff8d7b" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.342044 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4x6rr" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.379272 4910 scope.go:117] "RemoveContainer" containerID="7c6f9483b7c6f1544b6599061482489815e9f2dff0c795a69616edd9dcfc8a75" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.406022 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4x6rr"] Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.420862 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4x6rr"] Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.425787 4910 scope.go:117] "RemoveContainer" containerID="d33b9bdb21cd7362865eabf536ef63b690c78577d58b48f2be7d93e89e2e795f" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.477162 4910 scope.go:117] "RemoveContainer" containerID="8be4c8c6dfa328da8332fb903cc2e777381447a0c055e9ee23c32d2c72ff8d7b" Nov 25 22:09:46 crc kubenswrapper[4910]: E1125 22:09:46.477689 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8be4c8c6dfa328da8332fb903cc2e777381447a0c055e9ee23c32d2c72ff8d7b\": container with ID starting with 8be4c8c6dfa328da8332fb903cc2e777381447a0c055e9ee23c32d2c72ff8d7b not found: ID does not exist" containerID="8be4c8c6dfa328da8332fb903cc2e777381447a0c055e9ee23c32d2c72ff8d7b" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.477807 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8be4c8c6dfa328da8332fb903cc2e777381447a0c055e9ee23c32d2c72ff8d7b"} err="failed to get container status \"8be4c8c6dfa328da8332fb903cc2e777381447a0c055e9ee23c32d2c72ff8d7b\": rpc error: code = NotFound desc = could not find container \"8be4c8c6dfa328da8332fb903cc2e777381447a0c055e9ee23c32d2c72ff8d7b\": container with ID starting with 8be4c8c6dfa328da8332fb903cc2e777381447a0c055e9ee23c32d2c72ff8d7b not found: ID does not exist" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.477934 4910 scope.go:117] "RemoveContainer" containerID="7c6f9483b7c6f1544b6599061482489815e9f2dff0c795a69616edd9dcfc8a75" Nov 25 22:09:46 crc kubenswrapper[4910]: E1125 22:09:46.478404 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7c6f9483b7c6f1544b6599061482489815e9f2dff0c795a69616edd9dcfc8a75\": container with ID starting with 7c6f9483b7c6f1544b6599061482489815e9f2dff0c795a69616edd9dcfc8a75 not found: ID does not exist" containerID="7c6f9483b7c6f1544b6599061482489815e9f2dff0c795a69616edd9dcfc8a75" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.478453 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7c6f9483b7c6f1544b6599061482489815e9f2dff0c795a69616edd9dcfc8a75"} err="failed to get container status \"7c6f9483b7c6f1544b6599061482489815e9f2dff0c795a69616edd9dcfc8a75\": rpc error: code = NotFound desc = could not find container \"7c6f9483b7c6f1544b6599061482489815e9f2dff0c795a69616edd9dcfc8a75\": container with ID starting with 7c6f9483b7c6f1544b6599061482489815e9f2dff0c795a69616edd9dcfc8a75 not found: ID does not exist" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.478481 4910 scope.go:117] "RemoveContainer" containerID="d33b9bdb21cd7362865eabf536ef63b690c78577d58b48f2be7d93e89e2e795f" Nov 25 22:09:46 crc kubenswrapper[4910]: E1125 22:09:46.478892 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d33b9bdb21cd7362865eabf536ef63b690c78577d58b48f2be7d93e89e2e795f\": container with ID starting with d33b9bdb21cd7362865eabf536ef63b690c78577d58b48f2be7d93e89e2e795f not found: ID does not exist" containerID="d33b9bdb21cd7362865eabf536ef63b690c78577d58b48f2be7d93e89e2e795f" Nov 25 22:09:46 crc kubenswrapper[4910]: I1125 22:09:46.478950 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d33b9bdb21cd7362865eabf536ef63b690c78577d58b48f2be7d93e89e2e795f"} err="failed to get container status \"d33b9bdb21cd7362865eabf536ef63b690c78577d58b48f2be7d93e89e2e795f\": rpc error: code = NotFound desc = could not find container \"d33b9bdb21cd7362865eabf536ef63b690c78577d58b48f2be7d93e89e2e795f\": container with ID starting with d33b9bdb21cd7362865eabf536ef63b690c78577d58b48f2be7d93e89e2e795f not found: ID does not exist" Nov 25 22:09:47 crc kubenswrapper[4910]: I1125 22:09:47.218526 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb6ed988-974b-42d9-bb2d-420dab06c76d" path="/var/lib/kubelet/pods/eb6ed988-974b-42d9-bb2d-420dab06c76d/volumes" Nov 25 22:09:51 crc kubenswrapper[4910]: I1125 22:09:51.205160 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:09:51 crc kubenswrapper[4910]: E1125 22:09:51.206408 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:10:05 crc kubenswrapper[4910]: I1125 22:10:05.224211 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:10:05 crc kubenswrapper[4910]: E1125 22:10:05.231686 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:10:19 crc kubenswrapper[4910]: I1125 22:10:19.205722 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:10:19 crc kubenswrapper[4910]: E1125 22:10:19.206804 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:10:34 crc kubenswrapper[4910]: I1125 22:10:34.203779 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:10:34 crc kubenswrapper[4910]: E1125 22:10:34.205694 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:10:48 crc kubenswrapper[4910]: I1125 22:10:48.206545 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:10:48 crc kubenswrapper[4910]: E1125 22:10:48.212113 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:11:02 crc kubenswrapper[4910]: I1125 22:11:02.236554 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:11:02 crc kubenswrapper[4910]: E1125 22:11:02.238099 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:11:14 crc kubenswrapper[4910]: I1125 22:11:14.204774 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:11:14 crc kubenswrapper[4910]: E1125 22:11:14.205624 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:11:28 crc kubenswrapper[4910]: I1125 22:11:28.206523 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:11:28 crc kubenswrapper[4910]: E1125 22:11:28.208467 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:11:40 crc kubenswrapper[4910]: I1125 22:11:40.205036 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:11:40 crc kubenswrapper[4910]: E1125 22:11:40.206282 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:11:52 crc kubenswrapper[4910]: I1125 22:11:52.204659 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:11:52 crc kubenswrapper[4910]: E1125 22:11:52.205798 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:12:06 crc kubenswrapper[4910]: I1125 22:12:06.463987 4910 generic.go:334] "Generic (PLEG): container finished" podID="0a190739-9d08-41b3-a45d-42d0b636ccad" containerID="3c1c64065492673b12c0eb9b884fc97dc024ba133a6a6a375b1efacba91ec24f" exitCode=0 Nov 25 22:12:06 crc kubenswrapper[4910]: I1125 22:12:06.464100 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" event={"ID":"0a190739-9d08-41b3-a45d-42d0b636ccad","Type":"ContainerDied","Data":"3c1c64065492673b12c0eb9b884fc97dc024ba133a6a6a375b1efacba91ec24f"} Nov 25 22:12:07 crc kubenswrapper[4910]: I1125 22:12:07.204746 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:12:07 crc kubenswrapper[4910]: I1125 22:12:07.484202 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"104e282474eb551cd25807cad718a0cf143a497ab64e055693acd02b62657ead"} Nov 25 22:12:07 crc kubenswrapper[4910]: I1125 22:12:07.993091 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:12:07 crc kubenswrapper[4910]: I1125 22:12:07.998928 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-inventory\") pod \"0a190739-9d08-41b3-a45d-42d0b636ccad\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " Nov 25 22:12:07 crc kubenswrapper[4910]: I1125 22:12:07.998991 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-migration-ssh-key-0\") pod \"0a190739-9d08-41b3-a45d-42d0b636ccad\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " Nov 25 22:12:07 crc kubenswrapper[4910]: I1125 22:12:07.999024 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-extra-config-0\") pod \"0a190739-9d08-41b3-a45d-42d0b636ccad\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " Nov 25 22:12:07 crc kubenswrapper[4910]: I1125 22:12:07.999153 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-cell1-compute-config-0\") pod \"0a190739-9d08-41b3-a45d-42d0b636ccad\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.044907 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "0a190739-9d08-41b3-a45d-42d0b636ccad" (UID: "0a190739-9d08-41b3-a45d-42d0b636ccad"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.052162 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-inventory" (OuterVolumeSpecName: "inventory") pod "0a190739-9d08-41b3-a45d-42d0b636ccad" (UID: "0a190739-9d08-41b3-a45d-42d0b636ccad"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.057693 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "0a190739-9d08-41b3-a45d-42d0b636ccad" (UID: "0a190739-9d08-41b3-a45d-42d0b636ccad"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.063104 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "0a190739-9d08-41b3-a45d-42d0b636ccad" (UID: "0a190739-9d08-41b3-a45d-42d0b636ccad"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.101343 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-migration-ssh-key-1\") pod \"0a190739-9d08-41b3-a45d-42d0b636ccad\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.101413 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-combined-ca-bundle\") pod \"0a190739-9d08-41b3-a45d-42d0b636ccad\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.101621 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfwp8\" (UniqueName: \"kubernetes.io/projected/0a190739-9d08-41b3-a45d-42d0b636ccad-kube-api-access-zfwp8\") pod \"0a190739-9d08-41b3-a45d-42d0b636ccad\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.102805 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-ssh-key\") pod \"0a190739-9d08-41b3-a45d-42d0b636ccad\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.103181 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-cell1-compute-config-1\") pod \"0a190739-9d08-41b3-a45d-42d0b636ccad\" (UID: \"0a190739-9d08-41b3-a45d-42d0b636ccad\") " Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.103748 4910 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.103767 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.103777 4910 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.103788 4910 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.106233 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a190739-9d08-41b3-a45d-42d0b636ccad-kube-api-access-zfwp8" (OuterVolumeSpecName: "kube-api-access-zfwp8") pod "0a190739-9d08-41b3-a45d-42d0b636ccad" (UID: "0a190739-9d08-41b3-a45d-42d0b636ccad"). InnerVolumeSpecName "kube-api-access-zfwp8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.107821 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "0a190739-9d08-41b3-a45d-42d0b636ccad" (UID: "0a190739-9d08-41b3-a45d-42d0b636ccad"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.138018 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "0a190739-9d08-41b3-a45d-42d0b636ccad" (UID: "0a190739-9d08-41b3-a45d-42d0b636ccad"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.144828 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0a190739-9d08-41b3-a45d-42d0b636ccad" (UID: "0a190739-9d08-41b3-a45d-42d0b636ccad"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.152786 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "0a190739-9d08-41b3-a45d-42d0b636ccad" (UID: "0a190739-9d08-41b3-a45d-42d0b636ccad"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.206564 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfwp8\" (UniqueName: \"kubernetes.io/projected/0a190739-9d08-41b3-a45d-42d0b636ccad-kube-api-access-zfwp8\") on node \"crc\" DevicePath \"\"" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.206610 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.206624 4910 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.206637 4910 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.206648 4910 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a190739-9d08-41b3-a45d-42d0b636ccad-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.500327 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" event={"ID":"0a190739-9d08-41b3-a45d-42d0b636ccad","Type":"ContainerDied","Data":"0761642c47b3e1bc65d81d41e9cfac8b812c1c9104a0ca04fc7b7e3fa89ffce1"} Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.500634 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0761642c47b3e1bc65d81d41e9cfac8b812c1c9104a0ca04fc7b7e3fa89ffce1" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.500436 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-sv2kk" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.617876 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw"] Nov 25 22:12:08 crc kubenswrapper[4910]: E1125 22:12:08.618413 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25215931-decf-468f-a3a5-b778736d3c32" containerName="registry-server" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.618433 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="25215931-decf-468f-a3a5-b778736d3c32" containerName="registry-server" Nov 25 22:12:08 crc kubenswrapper[4910]: E1125 22:12:08.618457 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25215931-decf-468f-a3a5-b778736d3c32" containerName="extract-utilities" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.618468 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="25215931-decf-468f-a3a5-b778736d3c32" containerName="extract-utilities" Nov 25 22:12:08 crc kubenswrapper[4910]: E1125 22:12:08.618478 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb6ed988-974b-42d9-bb2d-420dab06c76d" containerName="extract-content" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.618485 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb6ed988-974b-42d9-bb2d-420dab06c76d" containerName="extract-content" Nov 25 22:12:08 crc kubenswrapper[4910]: E1125 22:12:08.618498 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb6ed988-974b-42d9-bb2d-420dab06c76d" containerName="registry-server" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.618504 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb6ed988-974b-42d9-bb2d-420dab06c76d" containerName="registry-server" Nov 25 22:12:08 crc kubenswrapper[4910]: E1125 22:12:08.618531 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a190739-9d08-41b3-a45d-42d0b636ccad" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.618540 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a190739-9d08-41b3-a45d-42d0b636ccad" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 22:12:08 crc kubenswrapper[4910]: E1125 22:12:08.618553 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25215931-decf-468f-a3a5-b778736d3c32" containerName="extract-content" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.618560 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="25215931-decf-468f-a3a5-b778736d3c32" containerName="extract-content" Nov 25 22:12:08 crc kubenswrapper[4910]: E1125 22:12:08.618580 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb6ed988-974b-42d9-bb2d-420dab06c76d" containerName="extract-utilities" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.618587 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb6ed988-974b-42d9-bb2d-420dab06c76d" containerName="extract-utilities" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.618778 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb6ed988-974b-42d9-bb2d-420dab06c76d" containerName="registry-server" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.618794 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a190739-9d08-41b3-a45d-42d0b636ccad" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.618815 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="25215931-decf-468f-a3a5-b778736d3c32" containerName="registry-server" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.634000 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.637512 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-5czwr" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.639515 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.639747 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.640402 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.641641 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.654346 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw"] Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.723196 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b9skm\" (UniqueName: \"kubernetes.io/projected/66309eee-ce32-4108-82f9-e96dbc03dc45-kube-api-access-b9skm\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.723274 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.723330 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.723648 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.724093 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.724147 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.724387 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.826868 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.827029 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.827105 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b9skm\" (UniqueName: \"kubernetes.io/projected/66309eee-ce32-4108-82f9-e96dbc03dc45-kube-api-access-b9skm\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.827137 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.827209 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.827302 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.827447 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.837721 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.837932 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.838718 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.838714 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.841985 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.842424 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:08 crc kubenswrapper[4910]: I1125 22:12:08.860275 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b9skm\" (UniqueName: \"kubernetes.io/projected/66309eee-ce32-4108-82f9-e96dbc03dc45-kube-api-access-b9skm\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:09 crc kubenswrapper[4910]: I1125 22:12:09.003028 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:12:09 crc kubenswrapper[4910]: W1125 22:12:09.660750 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod66309eee_ce32_4108_82f9_e96dbc03dc45.slice/crio-ba4107404e24b44c14deffe80525efedc8ccd41addb36b30f043d5bec477e370 WatchSource:0}: Error finding container ba4107404e24b44c14deffe80525efedc8ccd41addb36b30f043d5bec477e370: Status 404 returned error can't find the container with id ba4107404e24b44c14deffe80525efedc8ccd41addb36b30f043d5bec477e370 Nov 25 22:12:09 crc kubenswrapper[4910]: I1125 22:12:09.669077 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw"] Nov 25 22:12:10 crc kubenswrapper[4910]: I1125 22:12:10.527475 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" event={"ID":"66309eee-ce32-4108-82f9-e96dbc03dc45","Type":"ContainerStarted","Data":"ba4107404e24b44c14deffe80525efedc8ccd41addb36b30f043d5bec477e370"} Nov 25 22:12:11 crc kubenswrapper[4910]: I1125 22:12:11.538381 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" event={"ID":"66309eee-ce32-4108-82f9-e96dbc03dc45","Type":"ContainerStarted","Data":"f9e5f63267570fdfa89e6e7bf8214722970b5b876b90f946dc81874db6b93b1c"} Nov 25 22:12:11 crc kubenswrapper[4910]: I1125 22:12:11.577177 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" podStartSLOduration=2.912927949 podStartE2EDuration="3.577153951s" podCreationTimestamp="2025-11-25 22:12:08 +0000 UTC" firstStartedPulling="2025-11-25 22:12:09.664068173 +0000 UTC m=+2485.126544505" lastFinishedPulling="2025-11-25 22:12:10.328294155 +0000 UTC m=+2485.790770507" observedRunningTime="2025-11-25 22:12:11.573011369 +0000 UTC m=+2487.035487691" watchObservedRunningTime="2025-11-25 22:12:11.577153951 +0000 UTC m=+2487.039630283" Nov 25 22:14:23 crc kubenswrapper[4910]: I1125 22:14:23.098926 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:14:23 crc kubenswrapper[4910]: I1125 22:14:23.099713 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:14:39 crc kubenswrapper[4910]: I1125 22:14:39.336146 4910 generic.go:334] "Generic (PLEG): container finished" podID="66309eee-ce32-4108-82f9-e96dbc03dc45" containerID="f9e5f63267570fdfa89e6e7bf8214722970b5b876b90f946dc81874db6b93b1c" exitCode=0 Nov 25 22:14:39 crc kubenswrapper[4910]: I1125 22:14:39.336340 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" event={"ID":"66309eee-ce32-4108-82f9-e96dbc03dc45","Type":"ContainerDied","Data":"f9e5f63267570fdfa89e6e7bf8214722970b5b876b90f946dc81874db6b93b1c"} Nov 25 22:14:40 crc kubenswrapper[4910]: I1125 22:14:40.823905 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.011134 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-telemetry-combined-ca-bundle\") pod \"66309eee-ce32-4108-82f9-e96dbc03dc45\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.011189 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-2\") pod \"66309eee-ce32-4108-82f9-e96dbc03dc45\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.011524 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-0\") pod \"66309eee-ce32-4108-82f9-e96dbc03dc45\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.011585 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b9skm\" (UniqueName: \"kubernetes.io/projected/66309eee-ce32-4108-82f9-e96dbc03dc45-kube-api-access-b9skm\") pod \"66309eee-ce32-4108-82f9-e96dbc03dc45\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.011611 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-1\") pod \"66309eee-ce32-4108-82f9-e96dbc03dc45\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.011654 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ssh-key\") pod \"66309eee-ce32-4108-82f9-e96dbc03dc45\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.011719 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-inventory\") pod \"66309eee-ce32-4108-82f9-e96dbc03dc45\" (UID: \"66309eee-ce32-4108-82f9-e96dbc03dc45\") " Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.043855 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "66309eee-ce32-4108-82f9-e96dbc03dc45" (UID: "66309eee-ce32-4108-82f9-e96dbc03dc45"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.074715 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66309eee-ce32-4108-82f9-e96dbc03dc45-kube-api-access-b9skm" (OuterVolumeSpecName: "kube-api-access-b9skm") pod "66309eee-ce32-4108-82f9-e96dbc03dc45" (UID: "66309eee-ce32-4108-82f9-e96dbc03dc45"). InnerVolumeSpecName "kube-api-access-b9skm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.114995 4910 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.115044 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b9skm\" (UniqueName: \"kubernetes.io/projected/66309eee-ce32-4108-82f9-e96dbc03dc45-kube-api-access-b9skm\") on node \"crc\" DevicePath \"\"" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.130469 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "66309eee-ce32-4108-82f9-e96dbc03dc45" (UID: "66309eee-ce32-4108-82f9-e96dbc03dc45"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.132936 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-inventory" (OuterVolumeSpecName: "inventory") pod "66309eee-ce32-4108-82f9-e96dbc03dc45" (UID: "66309eee-ce32-4108-82f9-e96dbc03dc45"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.142678 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "66309eee-ce32-4108-82f9-e96dbc03dc45" (UID: "66309eee-ce32-4108-82f9-e96dbc03dc45"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.145382 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "66309eee-ce32-4108-82f9-e96dbc03dc45" (UID: "66309eee-ce32-4108-82f9-e96dbc03dc45"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.146380 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "66309eee-ce32-4108-82f9-e96dbc03dc45" (UID: "66309eee-ce32-4108-82f9-e96dbc03dc45"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.217436 4910 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.217491 4910 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.217507 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.217523 4910 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-inventory\") on node \"crc\" DevicePath \"\"" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.217539 4910 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/66309eee-ce32-4108-82f9-e96dbc03dc45-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.362616 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" event={"ID":"66309eee-ce32-4108-82f9-e96dbc03dc45","Type":"ContainerDied","Data":"ba4107404e24b44c14deffe80525efedc8ccd41addb36b30f043d5bec477e370"} Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.362679 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba4107404e24b44c14deffe80525efedc8ccd41addb36b30f043d5bec477e370" Nov 25 22:14:41 crc kubenswrapper[4910]: I1125 22:14:41.362728 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw" Nov 25 22:14:53 crc kubenswrapper[4910]: I1125 22:14:53.099145 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:14:53 crc kubenswrapper[4910]: I1125 22:14:53.100701 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.173962 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf"] Nov 25 22:15:00 crc kubenswrapper[4910]: E1125 22:15:00.175100 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66309eee-ce32-4108-82f9-e96dbc03dc45" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.175120 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="66309eee-ce32-4108-82f9-e96dbc03dc45" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.175394 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="66309eee-ce32-4108-82f9-e96dbc03dc45" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.176202 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.179563 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.179996 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.195578 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf"] Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.358186 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hc8n8\" (UniqueName: \"kubernetes.io/projected/a6664d06-d361-43d5-b52a-85f2d1d330d7-kube-api-access-hc8n8\") pod \"collect-profiles-29401815-nfjzf\" (UID: \"a6664d06-d361-43d5-b52a-85f2d1d330d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.358833 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a6664d06-d361-43d5-b52a-85f2d1d330d7-config-volume\") pod \"collect-profiles-29401815-nfjzf\" (UID: \"a6664d06-d361-43d5-b52a-85f2d1d330d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.358987 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a6664d06-d361-43d5-b52a-85f2d1d330d7-secret-volume\") pod \"collect-profiles-29401815-nfjzf\" (UID: \"a6664d06-d361-43d5-b52a-85f2d1d330d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.460823 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a6664d06-d361-43d5-b52a-85f2d1d330d7-config-volume\") pod \"collect-profiles-29401815-nfjzf\" (UID: \"a6664d06-d361-43d5-b52a-85f2d1d330d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.460911 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a6664d06-d361-43d5-b52a-85f2d1d330d7-secret-volume\") pod \"collect-profiles-29401815-nfjzf\" (UID: \"a6664d06-d361-43d5-b52a-85f2d1d330d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.460974 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hc8n8\" (UniqueName: \"kubernetes.io/projected/a6664d06-d361-43d5-b52a-85f2d1d330d7-kube-api-access-hc8n8\") pod \"collect-profiles-29401815-nfjzf\" (UID: \"a6664d06-d361-43d5-b52a-85f2d1d330d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.462776 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a6664d06-d361-43d5-b52a-85f2d1d330d7-config-volume\") pod \"collect-profiles-29401815-nfjzf\" (UID: \"a6664d06-d361-43d5-b52a-85f2d1d330d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.472613 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a6664d06-d361-43d5-b52a-85f2d1d330d7-secret-volume\") pod \"collect-profiles-29401815-nfjzf\" (UID: \"a6664d06-d361-43d5-b52a-85f2d1d330d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.502129 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hc8n8\" (UniqueName: \"kubernetes.io/projected/a6664d06-d361-43d5-b52a-85f2d1d330d7-kube-api-access-hc8n8\") pod \"collect-profiles-29401815-nfjzf\" (UID: \"a6664d06-d361-43d5-b52a-85f2d1d330d7\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" Nov 25 22:15:00 crc kubenswrapper[4910]: I1125 22:15:00.510910 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" Nov 25 22:15:01 crc kubenswrapper[4910]: I1125 22:15:01.018780 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf"] Nov 25 22:15:01 crc kubenswrapper[4910]: W1125 22:15:01.024499 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6664d06_d361_43d5_b52a_85f2d1d330d7.slice/crio-cff40724beffafecd27de3320748d3534712769d0d50651a3cc5ec343ee9d038 WatchSource:0}: Error finding container cff40724beffafecd27de3320748d3534712769d0d50651a3cc5ec343ee9d038: Status 404 returned error can't find the container with id cff40724beffafecd27de3320748d3534712769d0d50651a3cc5ec343ee9d038 Nov 25 22:15:01 crc kubenswrapper[4910]: I1125 22:15:01.623969 4910 generic.go:334] "Generic (PLEG): container finished" podID="a6664d06-d361-43d5-b52a-85f2d1d330d7" containerID="1e6120fbd296f96eef5f078a36693b2dc5825e19753f68928bc681a6a7da0c64" exitCode=0 Nov 25 22:15:01 crc kubenswrapper[4910]: I1125 22:15:01.624134 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" event={"ID":"a6664d06-d361-43d5-b52a-85f2d1d330d7","Type":"ContainerDied","Data":"1e6120fbd296f96eef5f078a36693b2dc5825e19753f68928bc681a6a7da0c64"} Nov 25 22:15:01 crc kubenswrapper[4910]: I1125 22:15:01.624725 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" event={"ID":"a6664d06-d361-43d5-b52a-85f2d1d330d7","Type":"ContainerStarted","Data":"cff40724beffafecd27de3320748d3534712769d0d50651a3cc5ec343ee9d038"} Nov 25 22:15:03 crc kubenswrapper[4910]: I1125 22:15:03.027279 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" Nov 25 22:15:03 crc kubenswrapper[4910]: I1125 22:15:03.229774 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hc8n8\" (UniqueName: \"kubernetes.io/projected/a6664d06-d361-43d5-b52a-85f2d1d330d7-kube-api-access-hc8n8\") pod \"a6664d06-d361-43d5-b52a-85f2d1d330d7\" (UID: \"a6664d06-d361-43d5-b52a-85f2d1d330d7\") " Nov 25 22:15:03 crc kubenswrapper[4910]: I1125 22:15:03.230064 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a6664d06-d361-43d5-b52a-85f2d1d330d7-config-volume\") pod \"a6664d06-d361-43d5-b52a-85f2d1d330d7\" (UID: \"a6664d06-d361-43d5-b52a-85f2d1d330d7\") " Nov 25 22:15:03 crc kubenswrapper[4910]: I1125 22:15:03.230157 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a6664d06-d361-43d5-b52a-85f2d1d330d7-secret-volume\") pod \"a6664d06-d361-43d5-b52a-85f2d1d330d7\" (UID: \"a6664d06-d361-43d5-b52a-85f2d1d330d7\") " Nov 25 22:15:03 crc kubenswrapper[4910]: I1125 22:15:03.231046 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a6664d06-d361-43d5-b52a-85f2d1d330d7-config-volume" (OuterVolumeSpecName: "config-volume") pod "a6664d06-d361-43d5-b52a-85f2d1d330d7" (UID: "a6664d06-d361-43d5-b52a-85f2d1d330d7"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:15:03 crc kubenswrapper[4910]: I1125 22:15:03.233115 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/a6664d06-d361-43d5-b52a-85f2d1d330d7-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 22:15:03 crc kubenswrapper[4910]: I1125 22:15:03.238340 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a6664d06-d361-43d5-b52a-85f2d1d330d7-kube-api-access-hc8n8" (OuterVolumeSpecName: "kube-api-access-hc8n8") pod "a6664d06-d361-43d5-b52a-85f2d1d330d7" (UID: "a6664d06-d361-43d5-b52a-85f2d1d330d7"). InnerVolumeSpecName "kube-api-access-hc8n8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:15:03 crc kubenswrapper[4910]: I1125 22:15:03.248283 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a6664d06-d361-43d5-b52a-85f2d1d330d7-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "a6664d06-d361-43d5-b52a-85f2d1d330d7" (UID: "a6664d06-d361-43d5-b52a-85f2d1d330d7"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:15:03 crc kubenswrapper[4910]: I1125 22:15:03.336059 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hc8n8\" (UniqueName: \"kubernetes.io/projected/a6664d06-d361-43d5-b52a-85f2d1d330d7-kube-api-access-hc8n8\") on node \"crc\" DevicePath \"\"" Nov 25 22:15:03 crc kubenswrapper[4910]: I1125 22:15:03.336104 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/a6664d06-d361-43d5-b52a-85f2d1d330d7-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 22:15:03 crc kubenswrapper[4910]: I1125 22:15:03.670423 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" event={"ID":"a6664d06-d361-43d5-b52a-85f2d1d330d7","Type":"ContainerDied","Data":"cff40724beffafecd27de3320748d3534712769d0d50651a3cc5ec343ee9d038"} Nov 25 22:15:03 crc kubenswrapper[4910]: I1125 22:15:03.670929 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cff40724beffafecd27de3320748d3534712769d0d50651a3cc5ec343ee9d038" Nov 25 22:15:03 crc kubenswrapper[4910]: I1125 22:15:03.670488 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401815-nfjzf" Nov 25 22:15:04 crc kubenswrapper[4910]: I1125 22:15:04.130987 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx"] Nov 25 22:15:04 crc kubenswrapper[4910]: I1125 22:15:04.141224 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401770-kk6cx"] Nov 25 22:15:05 crc kubenswrapper[4910]: I1125 22:15:05.224838 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fba1f4e8-1272-428d-95eb-7e01208f7b97" path="/var/lib/kubelet/pods/fba1f4e8-1272-428d-95eb-7e01208f7b97/volumes" Nov 25 22:15:23 crc kubenswrapper[4910]: I1125 22:15:23.099458 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:15:23 crc kubenswrapper[4910]: I1125 22:15:23.100134 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:15:23 crc kubenswrapper[4910]: I1125 22:15:23.100211 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 22:15:23 crc kubenswrapper[4910]: I1125 22:15:23.101212 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"104e282474eb551cd25807cad718a0cf143a497ab64e055693acd02b62657ead"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 22:15:23 crc kubenswrapper[4910]: I1125 22:15:23.101300 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://104e282474eb551cd25807cad718a0cf143a497ab64e055693acd02b62657ead" gracePeriod=600 Nov 25 22:15:23 crc kubenswrapper[4910]: I1125 22:15:23.943104 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="104e282474eb551cd25807cad718a0cf143a497ab64e055693acd02b62657ead" exitCode=0 Nov 25 22:15:23 crc kubenswrapper[4910]: I1125 22:15:23.943204 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"104e282474eb551cd25807cad718a0cf143a497ab64e055693acd02b62657ead"} Nov 25 22:15:23 crc kubenswrapper[4910]: I1125 22:15:23.944003 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075"} Nov 25 22:15:23 crc kubenswrapper[4910]: I1125 22:15:23.944053 4910 scope.go:117] "RemoveContainer" containerID="a0a96b8990ad07f6d22159678f56f75829756d957cfa2eb5b0ac3c1c3b3f4255" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.763822 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 22:15:41 crc kubenswrapper[4910]: E1125 22:15:41.765518 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a6664d06-d361-43d5-b52a-85f2d1d330d7" containerName="collect-profiles" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.765543 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a6664d06-d361-43d5-b52a-85f2d1d330d7" containerName="collect-profiles" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.765932 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a6664d06-d361-43d5-b52a-85f2d1d330d7" containerName="collect-profiles" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.767712 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.771621 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.771646 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.771682 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-d4cgq" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.771734 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.779769 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.928076 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.928139 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.928214 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.928282 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c1d3df8e-e3e1-4065-8736-979a4abaec2c-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.928945 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.929234 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/c1d3df8e-e3e1-4065-8736-979a4abaec2c-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.929399 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/c1d3df8e-e3e1-4065-8736-979a4abaec2c-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.929599 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c1d3df8e-e3e1-4065-8736-979a4abaec2c-config-data\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:41 crc kubenswrapper[4910]: I1125 22:15:41.929680 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5zflq\" (UniqueName: \"kubernetes.io/projected/c1d3df8e-e3e1-4065-8736-979a4abaec2c-kube-api-access-5zflq\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.031216 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.031316 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/c1d3df8e-e3e1-4065-8736-979a4abaec2c-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.031348 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/c1d3df8e-e3e1-4065-8736-979a4abaec2c-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.031380 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c1d3df8e-e3e1-4065-8736-979a4abaec2c-config-data\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.031543 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5zflq\" (UniqueName: \"kubernetes.io/projected/c1d3df8e-e3e1-4065-8736-979a4abaec2c-kube-api-access-5zflq\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.031572 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.032157 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/c1d3df8e-e3e1-4065-8736-979a4abaec2c-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.032209 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/c1d3df8e-e3e1-4065-8736-979a4abaec2c-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.032711 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.033012 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c1d3df8e-e3e1-4065-8736-979a4abaec2c-config-data\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.031593 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.036636 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.036782 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c1d3df8e-e3e1-4065-8736-979a4abaec2c-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.037816 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c1d3df8e-e3e1-4065-8736-979a4abaec2c-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.043113 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.043346 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.049456 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.059695 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5zflq\" (UniqueName: \"kubernetes.io/projected/c1d3df8e-e3e1-4065-8736-979a4abaec2c-kube-api-access-5zflq\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.079146 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"tempest-tests-tempest\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.104790 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.436339 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 22:15:42 crc kubenswrapper[4910]: I1125 22:15:42.443798 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 25 22:15:43 crc kubenswrapper[4910]: I1125 22:15:43.225420 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"c1d3df8e-e3e1-4065-8736-979a4abaec2c","Type":"ContainerStarted","Data":"a1caf090cf6ef76baf3ea934750466c57363fb286e96ca3663e8c7865932a214"} Nov 25 22:15:55 crc kubenswrapper[4910]: I1125 22:15:55.906816 4910 scope.go:117] "RemoveContainer" containerID="326bc5580b1be5088033ece2c528c2ec47df33c236b165c974770c30749b051c" Nov 25 22:16:12 crc kubenswrapper[4910]: E1125 22:16:12.180685 4910 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 25 22:16:12 crc kubenswrapper[4910]: E1125 22:16:12.181812 4910 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-5zflq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(c1d3df8e-e3e1-4065-8736-979a4abaec2c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 25 22:16:12 crc kubenswrapper[4910]: E1125 22:16:12.183369 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="c1d3df8e-e3e1-4065-8736-979a4abaec2c" Nov 25 22:16:12 crc kubenswrapper[4910]: E1125 22:16:12.616370 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="c1d3df8e-e3e1-4065-8736-979a4abaec2c" Nov 25 22:16:24 crc kubenswrapper[4910]: I1125 22:16:24.673902 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 25 22:16:26 crc kubenswrapper[4910]: I1125 22:16:26.787632 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"c1d3df8e-e3e1-4065-8736-979a4abaec2c","Type":"ContainerStarted","Data":"1c2b4a27d12be81296ba04bbf7a81185b5da36734a7fa6b3ea5534b23d262925"} Nov 25 22:16:26 crc kubenswrapper[4910]: I1125 22:16:26.832474 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=4.598324522 podStartE2EDuration="46.832444653s" podCreationTimestamp="2025-11-25 22:15:40 +0000 UTC" firstStartedPulling="2025-11-25 22:15:42.436033789 +0000 UTC m=+2697.898510111" lastFinishedPulling="2025-11-25 22:16:24.67015391 +0000 UTC m=+2740.132630242" observedRunningTime="2025-11-25 22:16:26.819095591 +0000 UTC m=+2742.281571923" watchObservedRunningTime="2025-11-25 22:16:26.832444653 +0000 UTC m=+2742.294920985" Nov 25 22:16:56 crc kubenswrapper[4910]: I1125 22:16:56.080100 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-z8qdl"] Nov 25 22:16:56 crc kubenswrapper[4910]: I1125 22:16:56.083886 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:16:56 crc kubenswrapper[4910]: I1125 22:16:56.093668 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z8qdl"] Nov 25 22:16:56 crc kubenswrapper[4910]: I1125 22:16:56.105854 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32954b53-0f75-4031-8751-0bfcbae1c175-catalog-content\") pod \"certified-operators-z8qdl\" (UID: \"32954b53-0f75-4031-8751-0bfcbae1c175\") " pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:16:56 crc kubenswrapper[4910]: I1125 22:16:56.106211 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32954b53-0f75-4031-8751-0bfcbae1c175-utilities\") pod \"certified-operators-z8qdl\" (UID: \"32954b53-0f75-4031-8751-0bfcbae1c175\") " pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:16:56 crc kubenswrapper[4910]: I1125 22:16:56.106410 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4w2n\" (UniqueName: \"kubernetes.io/projected/32954b53-0f75-4031-8751-0bfcbae1c175-kube-api-access-n4w2n\") pod \"certified-operators-z8qdl\" (UID: \"32954b53-0f75-4031-8751-0bfcbae1c175\") " pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:16:56 crc kubenswrapper[4910]: I1125 22:16:56.207567 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32954b53-0f75-4031-8751-0bfcbae1c175-utilities\") pod \"certified-operators-z8qdl\" (UID: \"32954b53-0f75-4031-8751-0bfcbae1c175\") " pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:16:56 crc kubenswrapper[4910]: I1125 22:16:56.207640 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4w2n\" (UniqueName: \"kubernetes.io/projected/32954b53-0f75-4031-8751-0bfcbae1c175-kube-api-access-n4w2n\") pod \"certified-operators-z8qdl\" (UID: \"32954b53-0f75-4031-8751-0bfcbae1c175\") " pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:16:56 crc kubenswrapper[4910]: I1125 22:16:56.207758 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32954b53-0f75-4031-8751-0bfcbae1c175-catalog-content\") pod \"certified-operators-z8qdl\" (UID: \"32954b53-0f75-4031-8751-0bfcbae1c175\") " pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:16:56 crc kubenswrapper[4910]: I1125 22:16:56.208276 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32954b53-0f75-4031-8751-0bfcbae1c175-utilities\") pod \"certified-operators-z8qdl\" (UID: \"32954b53-0f75-4031-8751-0bfcbae1c175\") " pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:16:56 crc kubenswrapper[4910]: I1125 22:16:56.208305 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32954b53-0f75-4031-8751-0bfcbae1c175-catalog-content\") pod \"certified-operators-z8qdl\" (UID: \"32954b53-0f75-4031-8751-0bfcbae1c175\") " pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:16:56 crc kubenswrapper[4910]: I1125 22:16:56.239459 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4w2n\" (UniqueName: \"kubernetes.io/projected/32954b53-0f75-4031-8751-0bfcbae1c175-kube-api-access-n4w2n\") pod \"certified-operators-z8qdl\" (UID: \"32954b53-0f75-4031-8751-0bfcbae1c175\") " pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:16:56 crc kubenswrapper[4910]: I1125 22:16:56.412960 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:16:57 crc kubenswrapper[4910]: I1125 22:16:57.049457 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-z8qdl"] Nov 25 22:16:57 crc kubenswrapper[4910]: I1125 22:16:57.265226 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8qdl" event={"ID":"32954b53-0f75-4031-8751-0bfcbae1c175","Type":"ContainerStarted","Data":"e82c720791437256293ccbeaeec9e9c009f40cb6a4893b55ccb4b414e18658fb"} Nov 25 22:16:57 crc kubenswrapper[4910]: I1125 22:16:57.266587 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8qdl" event={"ID":"32954b53-0f75-4031-8751-0bfcbae1c175","Type":"ContainerStarted","Data":"4edf6625737d1ad006812238499d6b4138664ae1d487f5af42d0bcb16d8185b6"} Nov 25 22:16:58 crc kubenswrapper[4910]: I1125 22:16:58.276604 4910 generic.go:334] "Generic (PLEG): container finished" podID="32954b53-0f75-4031-8751-0bfcbae1c175" containerID="e82c720791437256293ccbeaeec9e9c009f40cb6a4893b55ccb4b414e18658fb" exitCode=0 Nov 25 22:16:58 crc kubenswrapper[4910]: I1125 22:16:58.276797 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8qdl" event={"ID":"32954b53-0f75-4031-8751-0bfcbae1c175","Type":"ContainerDied","Data":"e82c720791437256293ccbeaeec9e9c009f40cb6a4893b55ccb4b414e18658fb"} Nov 25 22:16:59 crc kubenswrapper[4910]: I1125 22:16:59.304484 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8qdl" event={"ID":"32954b53-0f75-4031-8751-0bfcbae1c175","Type":"ContainerStarted","Data":"a12fe4524f08b5e8983188ea6f9375421c3163f3d9ca0d54b106a9daf283fc9b"} Nov 25 22:17:00 crc kubenswrapper[4910]: I1125 22:17:00.315096 4910 generic.go:334] "Generic (PLEG): container finished" podID="32954b53-0f75-4031-8751-0bfcbae1c175" containerID="a12fe4524f08b5e8983188ea6f9375421c3163f3d9ca0d54b106a9daf283fc9b" exitCode=0 Nov 25 22:17:00 crc kubenswrapper[4910]: I1125 22:17:00.315222 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8qdl" event={"ID":"32954b53-0f75-4031-8751-0bfcbae1c175","Type":"ContainerDied","Data":"a12fe4524f08b5e8983188ea6f9375421c3163f3d9ca0d54b106a9daf283fc9b"} Nov 25 22:17:01 crc kubenswrapper[4910]: I1125 22:17:01.331860 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8qdl" event={"ID":"32954b53-0f75-4031-8751-0bfcbae1c175","Type":"ContainerStarted","Data":"9988ce729e4a1246c24d35137f0bdea5d3f0fd91ecd8a39af510c1aec969c62a"} Nov 25 22:17:01 crc kubenswrapper[4910]: I1125 22:17:01.366045 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-z8qdl" podStartSLOduration=2.926430769 podStartE2EDuration="5.366020076s" podCreationTimestamp="2025-11-25 22:16:56 +0000 UTC" firstStartedPulling="2025-11-25 22:16:58.278994312 +0000 UTC m=+2773.741470634" lastFinishedPulling="2025-11-25 22:17:00.718583629 +0000 UTC m=+2776.181059941" observedRunningTime="2025-11-25 22:17:01.353009643 +0000 UTC m=+2776.815486005" watchObservedRunningTime="2025-11-25 22:17:01.366020076 +0000 UTC m=+2776.828496418" Nov 25 22:17:06 crc kubenswrapper[4910]: I1125 22:17:06.413886 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:17:06 crc kubenswrapper[4910]: I1125 22:17:06.414934 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:17:07 crc kubenswrapper[4910]: I1125 22:17:07.491886 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-z8qdl" podUID="32954b53-0f75-4031-8751-0bfcbae1c175" containerName="registry-server" probeResult="failure" output=< Nov 25 22:17:07 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Nov 25 22:17:07 crc kubenswrapper[4910]: > Nov 25 22:17:16 crc kubenswrapper[4910]: I1125 22:17:16.475673 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:17:16 crc kubenswrapper[4910]: I1125 22:17:16.534021 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:17:18 crc kubenswrapper[4910]: I1125 22:17:18.729032 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z8qdl"] Nov 25 22:17:18 crc kubenswrapper[4910]: I1125 22:17:18.730167 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-z8qdl" podUID="32954b53-0f75-4031-8751-0bfcbae1c175" containerName="registry-server" containerID="cri-o://9988ce729e4a1246c24d35137f0bdea5d3f0fd91ecd8a39af510c1aec969c62a" gracePeriod=2 Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.253366 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.400071 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32954b53-0f75-4031-8751-0bfcbae1c175-utilities" (OuterVolumeSpecName: "utilities") pod "32954b53-0f75-4031-8751-0bfcbae1c175" (UID: "32954b53-0f75-4031-8751-0bfcbae1c175"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.400199 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32954b53-0f75-4031-8751-0bfcbae1c175-utilities\") pod \"32954b53-0f75-4031-8751-0bfcbae1c175\" (UID: \"32954b53-0f75-4031-8751-0bfcbae1c175\") " Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.400402 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n4w2n\" (UniqueName: \"kubernetes.io/projected/32954b53-0f75-4031-8751-0bfcbae1c175-kube-api-access-n4w2n\") pod \"32954b53-0f75-4031-8751-0bfcbae1c175\" (UID: \"32954b53-0f75-4031-8751-0bfcbae1c175\") " Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.401693 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32954b53-0f75-4031-8751-0bfcbae1c175-catalog-content\") pod \"32954b53-0f75-4031-8751-0bfcbae1c175\" (UID: \"32954b53-0f75-4031-8751-0bfcbae1c175\") " Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.404218 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/32954b53-0f75-4031-8751-0bfcbae1c175-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.412568 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/32954b53-0f75-4031-8751-0bfcbae1c175-kube-api-access-n4w2n" (OuterVolumeSpecName: "kube-api-access-n4w2n") pod "32954b53-0f75-4031-8751-0bfcbae1c175" (UID: "32954b53-0f75-4031-8751-0bfcbae1c175"). InnerVolumeSpecName "kube-api-access-n4w2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.450658 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/32954b53-0f75-4031-8751-0bfcbae1c175-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "32954b53-0f75-4031-8751-0bfcbae1c175" (UID: "32954b53-0f75-4031-8751-0bfcbae1c175"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.506826 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n4w2n\" (UniqueName: \"kubernetes.io/projected/32954b53-0f75-4031-8751-0bfcbae1c175-kube-api-access-n4w2n\") on node \"crc\" DevicePath \"\"" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.506873 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/32954b53-0f75-4031-8751-0bfcbae1c175-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.534684 4910 generic.go:334] "Generic (PLEG): container finished" podID="32954b53-0f75-4031-8751-0bfcbae1c175" containerID="9988ce729e4a1246c24d35137f0bdea5d3f0fd91ecd8a39af510c1aec969c62a" exitCode=0 Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.534748 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8qdl" event={"ID":"32954b53-0f75-4031-8751-0bfcbae1c175","Type":"ContainerDied","Data":"9988ce729e4a1246c24d35137f0bdea5d3f0fd91ecd8a39af510c1aec969c62a"} Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.534781 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-z8qdl" event={"ID":"32954b53-0f75-4031-8751-0bfcbae1c175","Type":"ContainerDied","Data":"4edf6625737d1ad006812238499d6b4138664ae1d487f5af42d0bcb16d8185b6"} Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.534822 4910 scope.go:117] "RemoveContainer" containerID="9988ce729e4a1246c24d35137f0bdea5d3f0fd91ecd8a39af510c1aec969c62a" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.534826 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-z8qdl" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.558510 4910 scope.go:117] "RemoveContainer" containerID="a12fe4524f08b5e8983188ea6f9375421c3163f3d9ca0d54b106a9daf283fc9b" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.582639 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-z8qdl"] Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.591869 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-z8qdl"] Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.622970 4910 scope.go:117] "RemoveContainer" containerID="e82c720791437256293ccbeaeec9e9c009f40cb6a4893b55ccb4b414e18658fb" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.645326 4910 scope.go:117] "RemoveContainer" containerID="9988ce729e4a1246c24d35137f0bdea5d3f0fd91ecd8a39af510c1aec969c62a" Nov 25 22:17:19 crc kubenswrapper[4910]: E1125 22:17:19.645895 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9988ce729e4a1246c24d35137f0bdea5d3f0fd91ecd8a39af510c1aec969c62a\": container with ID starting with 9988ce729e4a1246c24d35137f0bdea5d3f0fd91ecd8a39af510c1aec969c62a not found: ID does not exist" containerID="9988ce729e4a1246c24d35137f0bdea5d3f0fd91ecd8a39af510c1aec969c62a" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.645948 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9988ce729e4a1246c24d35137f0bdea5d3f0fd91ecd8a39af510c1aec969c62a"} err="failed to get container status \"9988ce729e4a1246c24d35137f0bdea5d3f0fd91ecd8a39af510c1aec969c62a\": rpc error: code = NotFound desc = could not find container \"9988ce729e4a1246c24d35137f0bdea5d3f0fd91ecd8a39af510c1aec969c62a\": container with ID starting with 9988ce729e4a1246c24d35137f0bdea5d3f0fd91ecd8a39af510c1aec969c62a not found: ID does not exist" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.645978 4910 scope.go:117] "RemoveContainer" containerID="a12fe4524f08b5e8983188ea6f9375421c3163f3d9ca0d54b106a9daf283fc9b" Nov 25 22:17:19 crc kubenswrapper[4910]: E1125 22:17:19.646333 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a12fe4524f08b5e8983188ea6f9375421c3163f3d9ca0d54b106a9daf283fc9b\": container with ID starting with a12fe4524f08b5e8983188ea6f9375421c3163f3d9ca0d54b106a9daf283fc9b not found: ID does not exist" containerID="a12fe4524f08b5e8983188ea6f9375421c3163f3d9ca0d54b106a9daf283fc9b" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.646374 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a12fe4524f08b5e8983188ea6f9375421c3163f3d9ca0d54b106a9daf283fc9b"} err="failed to get container status \"a12fe4524f08b5e8983188ea6f9375421c3163f3d9ca0d54b106a9daf283fc9b\": rpc error: code = NotFound desc = could not find container \"a12fe4524f08b5e8983188ea6f9375421c3163f3d9ca0d54b106a9daf283fc9b\": container with ID starting with a12fe4524f08b5e8983188ea6f9375421c3163f3d9ca0d54b106a9daf283fc9b not found: ID does not exist" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.646400 4910 scope.go:117] "RemoveContainer" containerID="e82c720791437256293ccbeaeec9e9c009f40cb6a4893b55ccb4b414e18658fb" Nov 25 22:17:19 crc kubenswrapper[4910]: E1125 22:17:19.646739 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e82c720791437256293ccbeaeec9e9c009f40cb6a4893b55ccb4b414e18658fb\": container with ID starting with e82c720791437256293ccbeaeec9e9c009f40cb6a4893b55ccb4b414e18658fb not found: ID does not exist" containerID="e82c720791437256293ccbeaeec9e9c009f40cb6a4893b55ccb4b414e18658fb" Nov 25 22:17:19 crc kubenswrapper[4910]: I1125 22:17:19.646768 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e82c720791437256293ccbeaeec9e9c009f40cb6a4893b55ccb4b414e18658fb"} err="failed to get container status \"e82c720791437256293ccbeaeec9e9c009f40cb6a4893b55ccb4b414e18658fb\": rpc error: code = NotFound desc = could not find container \"e82c720791437256293ccbeaeec9e9c009f40cb6a4893b55ccb4b414e18658fb\": container with ID starting with e82c720791437256293ccbeaeec9e9c009f40cb6a4893b55ccb4b414e18658fb not found: ID does not exist" Nov 25 22:17:21 crc kubenswrapper[4910]: I1125 22:17:21.215488 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="32954b53-0f75-4031-8751-0bfcbae1c175" path="/var/lib/kubelet/pods/32954b53-0f75-4031-8751-0bfcbae1c175/volumes" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.139714 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-slqpk"] Nov 25 22:17:22 crc kubenswrapper[4910]: E1125 22:17:22.140785 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32954b53-0f75-4031-8751-0bfcbae1c175" containerName="extract-content" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.140809 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="32954b53-0f75-4031-8751-0bfcbae1c175" containerName="extract-content" Nov 25 22:17:22 crc kubenswrapper[4910]: E1125 22:17:22.140880 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32954b53-0f75-4031-8751-0bfcbae1c175" containerName="extract-utilities" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.140894 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="32954b53-0f75-4031-8751-0bfcbae1c175" containerName="extract-utilities" Nov 25 22:17:22 crc kubenswrapper[4910]: E1125 22:17:22.140916 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="32954b53-0f75-4031-8751-0bfcbae1c175" containerName="registry-server" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.140927 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="32954b53-0f75-4031-8751-0bfcbae1c175" containerName="registry-server" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.141195 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="32954b53-0f75-4031-8751-0bfcbae1c175" containerName="registry-server" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.143214 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.159020 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-slqpk"] Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.167661 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-catalog-content\") pod \"redhat-operators-slqpk\" (UID: \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\") " pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.167795 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-utilities\") pod \"redhat-operators-slqpk\" (UID: \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\") " pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.167984 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hdx47\" (UniqueName: \"kubernetes.io/projected/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-kube-api-access-hdx47\") pod \"redhat-operators-slqpk\" (UID: \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\") " pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.270692 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-catalog-content\") pod \"redhat-operators-slqpk\" (UID: \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\") " pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.270818 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-utilities\") pod \"redhat-operators-slqpk\" (UID: \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\") " pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.270894 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hdx47\" (UniqueName: \"kubernetes.io/projected/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-kube-api-access-hdx47\") pod \"redhat-operators-slqpk\" (UID: \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\") " pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.271220 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-catalog-content\") pod \"redhat-operators-slqpk\" (UID: \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\") " pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.271484 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-utilities\") pod \"redhat-operators-slqpk\" (UID: \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\") " pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.304156 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hdx47\" (UniqueName: \"kubernetes.io/projected/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-kube-api-access-hdx47\") pod \"redhat-operators-slqpk\" (UID: \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\") " pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:22 crc kubenswrapper[4910]: I1125 22:17:22.466984 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:23 crc kubenswrapper[4910]: I1125 22:17:23.098789 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:17:23 crc kubenswrapper[4910]: I1125 22:17:23.099342 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:17:23 crc kubenswrapper[4910]: I1125 22:17:23.158928 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-slqpk"] Nov 25 22:17:23 crc kubenswrapper[4910]: I1125 22:17:23.598722 4910 generic.go:334] "Generic (PLEG): container finished" podID="b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" containerID="9a9a3de7e31c32bba08badd263fb522164f6973c359378f6ba6857f8c7a2865b" exitCode=0 Nov 25 22:17:23 crc kubenswrapper[4910]: I1125 22:17:23.599287 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-slqpk" event={"ID":"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150","Type":"ContainerDied","Data":"9a9a3de7e31c32bba08badd263fb522164f6973c359378f6ba6857f8c7a2865b"} Nov 25 22:17:23 crc kubenswrapper[4910]: I1125 22:17:23.599337 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-slqpk" event={"ID":"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150","Type":"ContainerStarted","Data":"62998ba4cf6497b051ba0b189a4068c1920df97526876c899c9a110daf857bd8"} Nov 25 22:17:24 crc kubenswrapper[4910]: I1125 22:17:24.612700 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-slqpk" event={"ID":"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150","Type":"ContainerStarted","Data":"922f13c31946570d9cad87797e1681d43304a186b2e00d479f13876dbe912372"} Nov 25 22:17:27 crc kubenswrapper[4910]: I1125 22:17:27.656608 4910 generic.go:334] "Generic (PLEG): container finished" podID="b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" containerID="922f13c31946570d9cad87797e1681d43304a186b2e00d479f13876dbe912372" exitCode=0 Nov 25 22:17:27 crc kubenswrapper[4910]: I1125 22:17:27.656745 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-slqpk" event={"ID":"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150","Type":"ContainerDied","Data":"922f13c31946570d9cad87797e1681d43304a186b2e00d479f13876dbe912372"} Nov 25 22:17:28 crc kubenswrapper[4910]: I1125 22:17:28.670619 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-slqpk" event={"ID":"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150","Type":"ContainerStarted","Data":"9ac2748bf919d950a81ceb994c7d404dcd496a8c833b8993e184ea70fbe78065"} Nov 25 22:17:28 crc kubenswrapper[4910]: I1125 22:17:28.700970 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-slqpk" podStartSLOduration=2.008950563 podStartE2EDuration="6.700949694s" podCreationTimestamp="2025-11-25 22:17:22 +0000 UTC" firstStartedPulling="2025-11-25 22:17:23.60077423 +0000 UTC m=+2799.063250552" lastFinishedPulling="2025-11-25 22:17:28.292773321 +0000 UTC m=+2803.755249683" observedRunningTime="2025-11-25 22:17:28.692872705 +0000 UTC m=+2804.155349067" watchObservedRunningTime="2025-11-25 22:17:28.700949694 +0000 UTC m=+2804.163426016" Nov 25 22:17:32 crc kubenswrapper[4910]: I1125 22:17:32.467837 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:32 crc kubenswrapper[4910]: I1125 22:17:32.468660 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:33 crc kubenswrapper[4910]: I1125 22:17:33.544696 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-slqpk" podUID="b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" containerName="registry-server" probeResult="failure" output=< Nov 25 22:17:33 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Nov 25 22:17:33 crc kubenswrapper[4910]: > Nov 25 22:17:42 crc kubenswrapper[4910]: I1125 22:17:42.566530 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:42 crc kubenswrapper[4910]: I1125 22:17:42.644568 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:42 crc kubenswrapper[4910]: I1125 22:17:42.841548 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-slqpk"] Nov 25 22:17:43 crc kubenswrapper[4910]: I1125 22:17:43.853099 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-slqpk" podUID="b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" containerName="registry-server" containerID="cri-o://9ac2748bf919d950a81ceb994c7d404dcd496a8c833b8993e184ea70fbe78065" gracePeriod=2 Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.489112 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.655942 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-catalog-content\") pod \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\" (UID: \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\") " Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.656329 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-utilities\") pod \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\" (UID: \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\") " Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.656417 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hdx47\" (UniqueName: \"kubernetes.io/projected/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-kube-api-access-hdx47\") pod \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\" (UID: \"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150\") " Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.657705 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-utilities" (OuterVolumeSpecName: "utilities") pod "b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" (UID: "b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.668420 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-kube-api-access-hdx47" (OuterVolumeSpecName: "kube-api-access-hdx47") pod "b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" (UID: "b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150"). InnerVolumeSpecName "kube-api-access-hdx47". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.745354 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" (UID: "b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.759306 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.759347 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.759358 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hdx47\" (UniqueName: \"kubernetes.io/projected/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150-kube-api-access-hdx47\") on node \"crc\" DevicePath \"\"" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.867775 4910 generic.go:334] "Generic (PLEG): container finished" podID="b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" containerID="9ac2748bf919d950a81ceb994c7d404dcd496a8c833b8993e184ea70fbe78065" exitCode=0 Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.867856 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-slqpk" event={"ID":"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150","Type":"ContainerDied","Data":"9ac2748bf919d950a81ceb994c7d404dcd496a8c833b8993e184ea70fbe78065"} Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.867903 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-slqpk" event={"ID":"b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150","Type":"ContainerDied","Data":"62998ba4cf6497b051ba0b189a4068c1920df97526876c899c9a110daf857bd8"} Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.867940 4910 scope.go:117] "RemoveContainer" containerID="9ac2748bf919d950a81ceb994c7d404dcd496a8c833b8993e184ea70fbe78065" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.868188 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-slqpk" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.907187 4910 scope.go:117] "RemoveContainer" containerID="922f13c31946570d9cad87797e1681d43304a186b2e00d479f13876dbe912372" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.924408 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-slqpk"] Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.946152 4910 scope.go:117] "RemoveContainer" containerID="9a9a3de7e31c32bba08badd263fb522164f6973c359378f6ba6857f8c7a2865b" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.948791 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-slqpk"] Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.996427 4910 scope.go:117] "RemoveContainer" containerID="9ac2748bf919d950a81ceb994c7d404dcd496a8c833b8993e184ea70fbe78065" Nov 25 22:17:44 crc kubenswrapper[4910]: E1125 22:17:44.997023 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ac2748bf919d950a81ceb994c7d404dcd496a8c833b8993e184ea70fbe78065\": container with ID starting with 9ac2748bf919d950a81ceb994c7d404dcd496a8c833b8993e184ea70fbe78065 not found: ID does not exist" containerID="9ac2748bf919d950a81ceb994c7d404dcd496a8c833b8993e184ea70fbe78065" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.997083 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ac2748bf919d950a81ceb994c7d404dcd496a8c833b8993e184ea70fbe78065"} err="failed to get container status \"9ac2748bf919d950a81ceb994c7d404dcd496a8c833b8993e184ea70fbe78065\": rpc error: code = NotFound desc = could not find container \"9ac2748bf919d950a81ceb994c7d404dcd496a8c833b8993e184ea70fbe78065\": container with ID starting with 9ac2748bf919d950a81ceb994c7d404dcd496a8c833b8993e184ea70fbe78065 not found: ID does not exist" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.997117 4910 scope.go:117] "RemoveContainer" containerID="922f13c31946570d9cad87797e1681d43304a186b2e00d479f13876dbe912372" Nov 25 22:17:44 crc kubenswrapper[4910]: E1125 22:17:44.997483 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"922f13c31946570d9cad87797e1681d43304a186b2e00d479f13876dbe912372\": container with ID starting with 922f13c31946570d9cad87797e1681d43304a186b2e00d479f13876dbe912372 not found: ID does not exist" containerID="922f13c31946570d9cad87797e1681d43304a186b2e00d479f13876dbe912372" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.997529 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"922f13c31946570d9cad87797e1681d43304a186b2e00d479f13876dbe912372"} err="failed to get container status \"922f13c31946570d9cad87797e1681d43304a186b2e00d479f13876dbe912372\": rpc error: code = NotFound desc = could not find container \"922f13c31946570d9cad87797e1681d43304a186b2e00d479f13876dbe912372\": container with ID starting with 922f13c31946570d9cad87797e1681d43304a186b2e00d479f13876dbe912372 not found: ID does not exist" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.997563 4910 scope.go:117] "RemoveContainer" containerID="9a9a3de7e31c32bba08badd263fb522164f6973c359378f6ba6857f8c7a2865b" Nov 25 22:17:44 crc kubenswrapper[4910]: E1125 22:17:44.997844 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a9a3de7e31c32bba08badd263fb522164f6973c359378f6ba6857f8c7a2865b\": container with ID starting with 9a9a3de7e31c32bba08badd263fb522164f6973c359378f6ba6857f8c7a2865b not found: ID does not exist" containerID="9a9a3de7e31c32bba08badd263fb522164f6973c359378f6ba6857f8c7a2865b" Nov 25 22:17:44 crc kubenswrapper[4910]: I1125 22:17:44.997880 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a9a3de7e31c32bba08badd263fb522164f6973c359378f6ba6857f8c7a2865b"} err="failed to get container status \"9a9a3de7e31c32bba08badd263fb522164f6973c359378f6ba6857f8c7a2865b\": rpc error: code = NotFound desc = could not find container \"9a9a3de7e31c32bba08badd263fb522164f6973c359378f6ba6857f8c7a2865b\": container with ID starting with 9a9a3de7e31c32bba08badd263fb522164f6973c359378f6ba6857f8c7a2865b not found: ID does not exist" Nov 25 22:17:45 crc kubenswrapper[4910]: I1125 22:17:45.219888 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" path="/var/lib/kubelet/pods/b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150/volumes" Nov 25 22:17:53 crc kubenswrapper[4910]: I1125 22:17:53.098466 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:17:53 crc kubenswrapper[4910]: I1125 22:17:53.099398 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:18:23 crc kubenswrapper[4910]: I1125 22:18:23.099049 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:18:23 crc kubenswrapper[4910]: I1125 22:18:23.099923 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:18:23 crc kubenswrapper[4910]: I1125 22:18:23.100017 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 22:18:23 crc kubenswrapper[4910]: I1125 22:18:23.101327 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 22:18:23 crc kubenswrapper[4910]: I1125 22:18:23.101434 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" gracePeriod=600 Nov 25 22:18:23 crc kubenswrapper[4910]: E1125 22:18:23.250552 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:18:23 crc kubenswrapper[4910]: I1125 22:18:23.415166 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" exitCode=0 Nov 25 22:18:23 crc kubenswrapper[4910]: I1125 22:18:23.415234 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075"} Nov 25 22:18:23 crc kubenswrapper[4910]: I1125 22:18:23.415642 4910 scope.go:117] "RemoveContainer" containerID="104e282474eb551cd25807cad718a0cf143a497ab64e055693acd02b62657ead" Nov 25 22:18:23 crc kubenswrapper[4910]: I1125 22:18:23.417338 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:18:23 crc kubenswrapper[4910]: E1125 22:18:23.417741 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:18:36 crc kubenswrapper[4910]: I1125 22:18:36.205417 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:18:36 crc kubenswrapper[4910]: E1125 22:18:36.206758 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:18:51 crc kubenswrapper[4910]: I1125 22:18:51.206022 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:18:51 crc kubenswrapper[4910]: E1125 22:18:51.209997 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:19:05 crc kubenswrapper[4910]: I1125 22:19:05.224520 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:19:05 crc kubenswrapper[4910]: E1125 22:19:05.226234 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:19:18 crc kubenswrapper[4910]: I1125 22:19:18.204231 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:19:18 crc kubenswrapper[4910]: E1125 22:19:18.205614 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:19:33 crc kubenswrapper[4910]: I1125 22:19:33.204163 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:19:33 crc kubenswrapper[4910]: E1125 22:19:33.207290 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:19:44 crc kubenswrapper[4910]: I1125 22:19:44.204623 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:19:44 crc kubenswrapper[4910]: E1125 22:19:44.206971 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:19:56 crc kubenswrapper[4910]: I1125 22:19:56.205755 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:19:56 crc kubenswrapper[4910]: E1125 22:19:56.207069 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.239448 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2rgz9"] Nov 25 22:20:04 crc kubenswrapper[4910]: E1125 22:20:04.240811 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" containerName="extract-utilities" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.240835 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" containerName="extract-utilities" Nov 25 22:20:04 crc kubenswrapper[4910]: E1125 22:20:04.240877 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" containerName="extract-content" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.240889 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" containerName="extract-content" Nov 25 22:20:04 crc kubenswrapper[4910]: E1125 22:20:04.240913 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" containerName="registry-server" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.240924 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" containerName="registry-server" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.241195 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="b71d4e4b-22fe-4dd5-aeb6-3cf5f0b64150" containerName="registry-server" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.243297 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.277712 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2rgz9"] Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.277941 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-catalog-content\") pod \"community-operators-2rgz9\" (UID: \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\") " pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.278091 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-utilities\") pod \"community-operators-2rgz9\" (UID: \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\") " pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.293838 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fplpc\" (UniqueName: \"kubernetes.io/projected/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-kube-api-access-fplpc\") pod \"community-operators-2rgz9\" (UID: \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\") " pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.396482 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fplpc\" (UniqueName: \"kubernetes.io/projected/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-kube-api-access-fplpc\") pod \"community-operators-2rgz9\" (UID: \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\") " pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.396558 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-catalog-content\") pod \"community-operators-2rgz9\" (UID: \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\") " pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.396620 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-utilities\") pod \"community-operators-2rgz9\" (UID: \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\") " pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.397336 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-utilities\") pod \"community-operators-2rgz9\" (UID: \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\") " pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.397570 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-catalog-content\") pod \"community-operators-2rgz9\" (UID: \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\") " pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.427416 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fplpc\" (UniqueName: \"kubernetes.io/projected/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-kube-api-access-fplpc\") pod \"community-operators-2rgz9\" (UID: \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\") " pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:04 crc kubenswrapper[4910]: I1125 22:20:04.572732 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:05 crc kubenswrapper[4910]: I1125 22:20:05.310038 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2rgz9"] Nov 25 22:20:05 crc kubenswrapper[4910]: I1125 22:20:05.742528 4910 generic.go:334] "Generic (PLEG): container finished" podID="d02ce3f4-ee87-46d5-9754-e1caf274bcf6" containerID="39ab9a49058f2589536a82574cdea537f8edf27b9700fb0fd92b77362649e521" exitCode=0 Nov 25 22:20:05 crc kubenswrapper[4910]: I1125 22:20:05.742594 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2rgz9" event={"ID":"d02ce3f4-ee87-46d5-9754-e1caf274bcf6","Type":"ContainerDied","Data":"39ab9a49058f2589536a82574cdea537f8edf27b9700fb0fd92b77362649e521"} Nov 25 22:20:05 crc kubenswrapper[4910]: I1125 22:20:05.743114 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2rgz9" event={"ID":"d02ce3f4-ee87-46d5-9754-e1caf274bcf6","Type":"ContainerStarted","Data":"2a1ab2ee52454218de3f9e2705ba6f5acec96bb5cf062422849205565f0f5bb6"} Nov 25 22:20:06 crc kubenswrapper[4910]: I1125 22:20:06.762206 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2rgz9" event={"ID":"d02ce3f4-ee87-46d5-9754-e1caf274bcf6","Type":"ContainerStarted","Data":"6e7ea3972d7cf9cfe16b81325fc01e840b7befa4c326ca1d154c0a1e9c8db5fe"} Nov 25 22:20:07 crc kubenswrapper[4910]: I1125 22:20:07.206327 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:20:07 crc kubenswrapper[4910]: E1125 22:20:07.207126 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:20:07 crc kubenswrapper[4910]: I1125 22:20:07.777836 4910 generic.go:334] "Generic (PLEG): container finished" podID="d02ce3f4-ee87-46d5-9754-e1caf274bcf6" containerID="6e7ea3972d7cf9cfe16b81325fc01e840b7befa4c326ca1d154c0a1e9c8db5fe" exitCode=0 Nov 25 22:20:07 crc kubenswrapper[4910]: I1125 22:20:07.777965 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2rgz9" event={"ID":"d02ce3f4-ee87-46d5-9754-e1caf274bcf6","Type":"ContainerDied","Data":"6e7ea3972d7cf9cfe16b81325fc01e840b7befa4c326ca1d154c0a1e9c8db5fe"} Nov 25 22:20:08 crc kubenswrapper[4910]: I1125 22:20:08.790414 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2rgz9" event={"ID":"d02ce3f4-ee87-46d5-9754-e1caf274bcf6","Type":"ContainerStarted","Data":"6265dad795338df4e0e64fbef79d66ade6c9ab328cbcec895b6647c6068c9afe"} Nov 25 22:20:08 crc kubenswrapper[4910]: I1125 22:20:08.821454 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2rgz9" podStartSLOduration=2.379643306 podStartE2EDuration="4.821436094s" podCreationTimestamp="2025-11-25 22:20:04 +0000 UTC" firstStartedPulling="2025-11-25 22:20:05.744852476 +0000 UTC m=+2961.207328808" lastFinishedPulling="2025-11-25 22:20:08.186645234 +0000 UTC m=+2963.649121596" observedRunningTime="2025-11-25 22:20:08.819601296 +0000 UTC m=+2964.282077618" watchObservedRunningTime="2025-11-25 22:20:08.821436094 +0000 UTC m=+2964.283912416" Nov 25 22:20:14 crc kubenswrapper[4910]: I1125 22:20:14.573769 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:14 crc kubenswrapper[4910]: I1125 22:20:14.574629 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:14 crc kubenswrapper[4910]: I1125 22:20:14.656633 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:14 crc kubenswrapper[4910]: I1125 22:20:14.920062 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:14 crc kubenswrapper[4910]: I1125 22:20:14.989037 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2rgz9"] Nov 25 22:20:16 crc kubenswrapper[4910]: I1125 22:20:16.882887 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2rgz9" podUID="d02ce3f4-ee87-46d5-9754-e1caf274bcf6" containerName="registry-server" containerID="cri-o://6265dad795338df4e0e64fbef79d66ade6c9ab328cbcec895b6647c6068c9afe" gracePeriod=2 Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.450172 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.543580 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fplpc\" (UniqueName: \"kubernetes.io/projected/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-kube-api-access-fplpc\") pod \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\" (UID: \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\") " Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.544621 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-catalog-content\") pod \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\" (UID: \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\") " Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.544757 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-utilities\") pod \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\" (UID: \"d02ce3f4-ee87-46d5-9754-e1caf274bcf6\") " Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.546041 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-utilities" (OuterVolumeSpecName: "utilities") pod "d02ce3f4-ee87-46d5-9754-e1caf274bcf6" (UID: "d02ce3f4-ee87-46d5-9754-e1caf274bcf6"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.551607 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-kube-api-access-fplpc" (OuterVolumeSpecName: "kube-api-access-fplpc") pod "d02ce3f4-ee87-46d5-9754-e1caf274bcf6" (UID: "d02ce3f4-ee87-46d5-9754-e1caf274bcf6"). InnerVolumeSpecName "kube-api-access-fplpc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.606083 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d02ce3f4-ee87-46d5-9754-e1caf274bcf6" (UID: "d02ce3f4-ee87-46d5-9754-e1caf274bcf6"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.646847 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.646896 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fplpc\" (UniqueName: \"kubernetes.io/projected/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-kube-api-access-fplpc\") on node \"crc\" DevicePath \"\"" Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.646909 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d02ce3f4-ee87-46d5-9754-e1caf274bcf6-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.897415 4910 generic.go:334] "Generic (PLEG): container finished" podID="d02ce3f4-ee87-46d5-9754-e1caf274bcf6" containerID="6265dad795338df4e0e64fbef79d66ade6c9ab328cbcec895b6647c6068c9afe" exitCode=0 Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.897497 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2rgz9" event={"ID":"d02ce3f4-ee87-46d5-9754-e1caf274bcf6","Type":"ContainerDied","Data":"6265dad795338df4e0e64fbef79d66ade6c9ab328cbcec895b6647c6068c9afe"} Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.897503 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2rgz9" Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.897552 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2rgz9" event={"ID":"d02ce3f4-ee87-46d5-9754-e1caf274bcf6","Type":"ContainerDied","Data":"2a1ab2ee52454218de3f9e2705ba6f5acec96bb5cf062422849205565f0f5bb6"} Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.897583 4910 scope.go:117] "RemoveContainer" containerID="6265dad795338df4e0e64fbef79d66ade6c9ab328cbcec895b6647c6068c9afe" Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.935622 4910 scope.go:117] "RemoveContainer" containerID="6e7ea3972d7cf9cfe16b81325fc01e840b7befa4c326ca1d154c0a1e9c8db5fe" Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.974328 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2rgz9"] Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.988819 4910 scope.go:117] "RemoveContainer" containerID="39ab9a49058f2589536a82574cdea537f8edf27b9700fb0fd92b77362649e521" Nov 25 22:20:17 crc kubenswrapper[4910]: I1125 22:20:17.991620 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2rgz9"] Nov 25 22:20:18 crc kubenswrapper[4910]: I1125 22:20:18.041557 4910 scope.go:117] "RemoveContainer" containerID="6265dad795338df4e0e64fbef79d66ade6c9ab328cbcec895b6647c6068c9afe" Nov 25 22:20:18 crc kubenswrapper[4910]: E1125 22:20:18.042200 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6265dad795338df4e0e64fbef79d66ade6c9ab328cbcec895b6647c6068c9afe\": container with ID starting with 6265dad795338df4e0e64fbef79d66ade6c9ab328cbcec895b6647c6068c9afe not found: ID does not exist" containerID="6265dad795338df4e0e64fbef79d66ade6c9ab328cbcec895b6647c6068c9afe" Nov 25 22:20:18 crc kubenswrapper[4910]: I1125 22:20:18.042260 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6265dad795338df4e0e64fbef79d66ade6c9ab328cbcec895b6647c6068c9afe"} err="failed to get container status \"6265dad795338df4e0e64fbef79d66ade6c9ab328cbcec895b6647c6068c9afe\": rpc error: code = NotFound desc = could not find container \"6265dad795338df4e0e64fbef79d66ade6c9ab328cbcec895b6647c6068c9afe\": container with ID starting with 6265dad795338df4e0e64fbef79d66ade6c9ab328cbcec895b6647c6068c9afe not found: ID does not exist" Nov 25 22:20:18 crc kubenswrapper[4910]: I1125 22:20:18.042287 4910 scope.go:117] "RemoveContainer" containerID="6e7ea3972d7cf9cfe16b81325fc01e840b7befa4c326ca1d154c0a1e9c8db5fe" Nov 25 22:20:18 crc kubenswrapper[4910]: E1125 22:20:18.042819 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e7ea3972d7cf9cfe16b81325fc01e840b7befa4c326ca1d154c0a1e9c8db5fe\": container with ID starting with 6e7ea3972d7cf9cfe16b81325fc01e840b7befa4c326ca1d154c0a1e9c8db5fe not found: ID does not exist" containerID="6e7ea3972d7cf9cfe16b81325fc01e840b7befa4c326ca1d154c0a1e9c8db5fe" Nov 25 22:20:18 crc kubenswrapper[4910]: I1125 22:20:18.042879 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e7ea3972d7cf9cfe16b81325fc01e840b7befa4c326ca1d154c0a1e9c8db5fe"} err="failed to get container status \"6e7ea3972d7cf9cfe16b81325fc01e840b7befa4c326ca1d154c0a1e9c8db5fe\": rpc error: code = NotFound desc = could not find container \"6e7ea3972d7cf9cfe16b81325fc01e840b7befa4c326ca1d154c0a1e9c8db5fe\": container with ID starting with 6e7ea3972d7cf9cfe16b81325fc01e840b7befa4c326ca1d154c0a1e9c8db5fe not found: ID does not exist" Nov 25 22:20:18 crc kubenswrapper[4910]: I1125 22:20:18.042919 4910 scope.go:117] "RemoveContainer" containerID="39ab9a49058f2589536a82574cdea537f8edf27b9700fb0fd92b77362649e521" Nov 25 22:20:18 crc kubenswrapper[4910]: E1125 22:20:18.043438 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"39ab9a49058f2589536a82574cdea537f8edf27b9700fb0fd92b77362649e521\": container with ID starting with 39ab9a49058f2589536a82574cdea537f8edf27b9700fb0fd92b77362649e521 not found: ID does not exist" containerID="39ab9a49058f2589536a82574cdea537f8edf27b9700fb0fd92b77362649e521" Nov 25 22:20:18 crc kubenswrapper[4910]: I1125 22:20:18.043472 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"39ab9a49058f2589536a82574cdea537f8edf27b9700fb0fd92b77362649e521"} err="failed to get container status \"39ab9a49058f2589536a82574cdea537f8edf27b9700fb0fd92b77362649e521\": rpc error: code = NotFound desc = could not find container \"39ab9a49058f2589536a82574cdea537f8edf27b9700fb0fd92b77362649e521\": container with ID starting with 39ab9a49058f2589536a82574cdea537f8edf27b9700fb0fd92b77362649e521 not found: ID does not exist" Nov 25 22:20:19 crc kubenswrapper[4910]: I1125 22:20:19.229814 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d02ce3f4-ee87-46d5-9754-e1caf274bcf6" path="/var/lib/kubelet/pods/d02ce3f4-ee87-46d5-9754-e1caf274bcf6/volumes" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.205000 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:20:20 crc kubenswrapper[4910]: E1125 22:20:20.205740 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.383776 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2bpjk"] Nov 25 22:20:20 crc kubenswrapper[4910]: E1125 22:20:20.384754 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d02ce3f4-ee87-46d5-9754-e1caf274bcf6" containerName="extract-utilities" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.385184 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d02ce3f4-ee87-46d5-9754-e1caf274bcf6" containerName="extract-utilities" Nov 25 22:20:20 crc kubenswrapper[4910]: E1125 22:20:20.385231 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d02ce3f4-ee87-46d5-9754-e1caf274bcf6" containerName="registry-server" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.385272 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d02ce3f4-ee87-46d5-9754-e1caf274bcf6" containerName="registry-server" Nov 25 22:20:20 crc kubenswrapper[4910]: E1125 22:20:20.385319 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d02ce3f4-ee87-46d5-9754-e1caf274bcf6" containerName="extract-content" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.385334 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d02ce3f4-ee87-46d5-9754-e1caf274bcf6" containerName="extract-content" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.385778 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d02ce3f4-ee87-46d5-9754-e1caf274bcf6" containerName="registry-server" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.389200 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.398405 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bpjk"] Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.523565 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jf9qg\" (UniqueName: \"kubernetes.io/projected/d613d783-08e1-4092-bde7-650a5b2a3431-kube-api-access-jf9qg\") pod \"redhat-marketplace-2bpjk\" (UID: \"d613d783-08e1-4092-bde7-650a5b2a3431\") " pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.523710 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d613d783-08e1-4092-bde7-650a5b2a3431-catalog-content\") pod \"redhat-marketplace-2bpjk\" (UID: \"d613d783-08e1-4092-bde7-650a5b2a3431\") " pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.523812 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d613d783-08e1-4092-bde7-650a5b2a3431-utilities\") pod \"redhat-marketplace-2bpjk\" (UID: \"d613d783-08e1-4092-bde7-650a5b2a3431\") " pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.625887 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jf9qg\" (UniqueName: \"kubernetes.io/projected/d613d783-08e1-4092-bde7-650a5b2a3431-kube-api-access-jf9qg\") pod \"redhat-marketplace-2bpjk\" (UID: \"d613d783-08e1-4092-bde7-650a5b2a3431\") " pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.626010 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d613d783-08e1-4092-bde7-650a5b2a3431-catalog-content\") pod \"redhat-marketplace-2bpjk\" (UID: \"d613d783-08e1-4092-bde7-650a5b2a3431\") " pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.626081 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d613d783-08e1-4092-bde7-650a5b2a3431-utilities\") pod \"redhat-marketplace-2bpjk\" (UID: \"d613d783-08e1-4092-bde7-650a5b2a3431\") " pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.626767 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d613d783-08e1-4092-bde7-650a5b2a3431-catalog-content\") pod \"redhat-marketplace-2bpjk\" (UID: \"d613d783-08e1-4092-bde7-650a5b2a3431\") " pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.626792 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d613d783-08e1-4092-bde7-650a5b2a3431-utilities\") pod \"redhat-marketplace-2bpjk\" (UID: \"d613d783-08e1-4092-bde7-650a5b2a3431\") " pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.651122 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jf9qg\" (UniqueName: \"kubernetes.io/projected/d613d783-08e1-4092-bde7-650a5b2a3431-kube-api-access-jf9qg\") pod \"redhat-marketplace-2bpjk\" (UID: \"d613d783-08e1-4092-bde7-650a5b2a3431\") " pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:20 crc kubenswrapper[4910]: I1125 22:20:20.713655 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:21 crc kubenswrapper[4910]: I1125 22:20:21.281575 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bpjk"] Nov 25 22:20:21 crc kubenswrapper[4910]: I1125 22:20:21.957171 4910 generic.go:334] "Generic (PLEG): container finished" podID="d613d783-08e1-4092-bde7-650a5b2a3431" containerID="54b06eef8c540e3c08a80996650a358ba9da91b99e6e1a6fbc615fc50d83c1e6" exitCode=0 Nov 25 22:20:21 crc kubenswrapper[4910]: I1125 22:20:21.957228 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bpjk" event={"ID":"d613d783-08e1-4092-bde7-650a5b2a3431","Type":"ContainerDied","Data":"54b06eef8c540e3c08a80996650a358ba9da91b99e6e1a6fbc615fc50d83c1e6"} Nov 25 22:20:21 crc kubenswrapper[4910]: I1125 22:20:21.957273 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bpjk" event={"ID":"d613d783-08e1-4092-bde7-650a5b2a3431","Type":"ContainerStarted","Data":"116fa0ce436decab3fb6656d71fd5441b620c3d5c981ad0aa9a899d672bc8828"} Nov 25 22:20:22 crc kubenswrapper[4910]: I1125 22:20:22.974347 4910 generic.go:334] "Generic (PLEG): container finished" podID="d613d783-08e1-4092-bde7-650a5b2a3431" containerID="28afe3723d033abf31d07805973d75ffa334c7ddfbe99e016668bd6de68df753" exitCode=0 Nov 25 22:20:22 crc kubenswrapper[4910]: I1125 22:20:22.974448 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bpjk" event={"ID":"d613d783-08e1-4092-bde7-650a5b2a3431","Type":"ContainerDied","Data":"28afe3723d033abf31d07805973d75ffa334c7ddfbe99e016668bd6de68df753"} Nov 25 22:20:24 crc kubenswrapper[4910]: I1125 22:20:24.004834 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bpjk" event={"ID":"d613d783-08e1-4092-bde7-650a5b2a3431","Type":"ContainerStarted","Data":"d4e3020435640e75e0551d5cef7a5ff0a7aca8f383ba6b6166cdbf82b542e91f"} Nov 25 22:20:24 crc kubenswrapper[4910]: I1125 22:20:24.033948 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2bpjk" podStartSLOduration=2.612175671 podStartE2EDuration="4.033926025s" podCreationTimestamp="2025-11-25 22:20:20 +0000 UTC" firstStartedPulling="2025-11-25 22:20:21.960911672 +0000 UTC m=+2977.423388024" lastFinishedPulling="2025-11-25 22:20:23.382662056 +0000 UTC m=+2978.845138378" observedRunningTime="2025-11-25 22:20:24.031728657 +0000 UTC m=+2979.494204989" watchObservedRunningTime="2025-11-25 22:20:24.033926025 +0000 UTC m=+2979.496402357" Nov 25 22:20:30 crc kubenswrapper[4910]: I1125 22:20:30.714273 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:30 crc kubenswrapper[4910]: I1125 22:20:30.715177 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:30 crc kubenswrapper[4910]: I1125 22:20:30.785789 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:31 crc kubenswrapper[4910]: I1125 22:20:31.135919 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:31 crc kubenswrapper[4910]: I1125 22:20:31.221739 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bpjk"] Nov 25 22:20:33 crc kubenswrapper[4910]: I1125 22:20:33.100823 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2bpjk" podUID="d613d783-08e1-4092-bde7-650a5b2a3431" containerName="registry-server" containerID="cri-o://d4e3020435640e75e0551d5cef7a5ff0a7aca8f383ba6b6166cdbf82b542e91f" gracePeriod=2 Nov 25 22:20:33 crc kubenswrapper[4910]: I1125 22:20:33.691565 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:33 crc kubenswrapper[4910]: I1125 22:20:33.767528 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d613d783-08e1-4092-bde7-650a5b2a3431-catalog-content\") pod \"d613d783-08e1-4092-bde7-650a5b2a3431\" (UID: \"d613d783-08e1-4092-bde7-650a5b2a3431\") " Nov 25 22:20:33 crc kubenswrapper[4910]: I1125 22:20:33.767810 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d613d783-08e1-4092-bde7-650a5b2a3431-utilities\") pod \"d613d783-08e1-4092-bde7-650a5b2a3431\" (UID: \"d613d783-08e1-4092-bde7-650a5b2a3431\") " Nov 25 22:20:33 crc kubenswrapper[4910]: I1125 22:20:33.768047 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jf9qg\" (UniqueName: \"kubernetes.io/projected/d613d783-08e1-4092-bde7-650a5b2a3431-kube-api-access-jf9qg\") pod \"d613d783-08e1-4092-bde7-650a5b2a3431\" (UID: \"d613d783-08e1-4092-bde7-650a5b2a3431\") " Nov 25 22:20:33 crc kubenswrapper[4910]: I1125 22:20:33.768991 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d613d783-08e1-4092-bde7-650a5b2a3431-utilities" (OuterVolumeSpecName: "utilities") pod "d613d783-08e1-4092-bde7-650a5b2a3431" (UID: "d613d783-08e1-4092-bde7-650a5b2a3431"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:20:33 crc kubenswrapper[4910]: I1125 22:20:33.777819 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d613d783-08e1-4092-bde7-650a5b2a3431-kube-api-access-jf9qg" (OuterVolumeSpecName: "kube-api-access-jf9qg") pod "d613d783-08e1-4092-bde7-650a5b2a3431" (UID: "d613d783-08e1-4092-bde7-650a5b2a3431"). InnerVolumeSpecName "kube-api-access-jf9qg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:20:33 crc kubenswrapper[4910]: I1125 22:20:33.784855 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d613d783-08e1-4092-bde7-650a5b2a3431-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d613d783-08e1-4092-bde7-650a5b2a3431" (UID: "d613d783-08e1-4092-bde7-650a5b2a3431"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:20:33 crc kubenswrapper[4910]: I1125 22:20:33.871027 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d613d783-08e1-4092-bde7-650a5b2a3431-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:20:33 crc kubenswrapper[4910]: I1125 22:20:33.871091 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d613d783-08e1-4092-bde7-650a5b2a3431-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:20:33 crc kubenswrapper[4910]: I1125 22:20:33.871101 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jf9qg\" (UniqueName: \"kubernetes.io/projected/d613d783-08e1-4092-bde7-650a5b2a3431-kube-api-access-jf9qg\") on node \"crc\" DevicePath \"\"" Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.117562 4910 generic.go:334] "Generic (PLEG): container finished" podID="d613d783-08e1-4092-bde7-650a5b2a3431" containerID="d4e3020435640e75e0551d5cef7a5ff0a7aca8f383ba6b6166cdbf82b542e91f" exitCode=0 Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.117634 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bpjk" event={"ID":"d613d783-08e1-4092-bde7-650a5b2a3431","Type":"ContainerDied","Data":"d4e3020435640e75e0551d5cef7a5ff0a7aca8f383ba6b6166cdbf82b542e91f"} Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.117680 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2bpjk" event={"ID":"d613d783-08e1-4092-bde7-650a5b2a3431","Type":"ContainerDied","Data":"116fa0ce436decab3fb6656d71fd5441b620c3d5c981ad0aa9a899d672bc8828"} Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.117707 4910 scope.go:117] "RemoveContainer" containerID="d4e3020435640e75e0551d5cef7a5ff0a7aca8f383ba6b6166cdbf82b542e91f" Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.117942 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2bpjk" Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.150677 4910 scope.go:117] "RemoveContainer" containerID="28afe3723d033abf31d07805973d75ffa334c7ddfbe99e016668bd6de68df753" Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.163548 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bpjk"] Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.175379 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2bpjk"] Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.183139 4910 scope.go:117] "RemoveContainer" containerID="54b06eef8c540e3c08a80996650a358ba9da91b99e6e1a6fbc615fc50d83c1e6" Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.211949 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:20:34 crc kubenswrapper[4910]: E1125 22:20:34.212310 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.220451 4910 scope.go:117] "RemoveContainer" containerID="d4e3020435640e75e0551d5cef7a5ff0a7aca8f383ba6b6166cdbf82b542e91f" Nov 25 22:20:34 crc kubenswrapper[4910]: E1125 22:20:34.221013 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d4e3020435640e75e0551d5cef7a5ff0a7aca8f383ba6b6166cdbf82b542e91f\": container with ID starting with d4e3020435640e75e0551d5cef7a5ff0a7aca8f383ba6b6166cdbf82b542e91f not found: ID does not exist" containerID="d4e3020435640e75e0551d5cef7a5ff0a7aca8f383ba6b6166cdbf82b542e91f" Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.221126 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d4e3020435640e75e0551d5cef7a5ff0a7aca8f383ba6b6166cdbf82b542e91f"} err="failed to get container status \"d4e3020435640e75e0551d5cef7a5ff0a7aca8f383ba6b6166cdbf82b542e91f\": rpc error: code = NotFound desc = could not find container \"d4e3020435640e75e0551d5cef7a5ff0a7aca8f383ba6b6166cdbf82b542e91f\": container with ID starting with d4e3020435640e75e0551d5cef7a5ff0a7aca8f383ba6b6166cdbf82b542e91f not found: ID does not exist" Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.221227 4910 scope.go:117] "RemoveContainer" containerID="28afe3723d033abf31d07805973d75ffa334c7ddfbe99e016668bd6de68df753" Nov 25 22:20:34 crc kubenswrapper[4910]: E1125 22:20:34.221681 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28afe3723d033abf31d07805973d75ffa334c7ddfbe99e016668bd6de68df753\": container with ID starting with 28afe3723d033abf31d07805973d75ffa334c7ddfbe99e016668bd6de68df753 not found: ID does not exist" containerID="28afe3723d033abf31d07805973d75ffa334c7ddfbe99e016668bd6de68df753" Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.221751 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28afe3723d033abf31d07805973d75ffa334c7ddfbe99e016668bd6de68df753"} err="failed to get container status \"28afe3723d033abf31d07805973d75ffa334c7ddfbe99e016668bd6de68df753\": rpc error: code = NotFound desc = could not find container \"28afe3723d033abf31d07805973d75ffa334c7ddfbe99e016668bd6de68df753\": container with ID starting with 28afe3723d033abf31d07805973d75ffa334c7ddfbe99e016668bd6de68df753 not found: ID does not exist" Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.221791 4910 scope.go:117] "RemoveContainer" containerID="54b06eef8c540e3c08a80996650a358ba9da91b99e6e1a6fbc615fc50d83c1e6" Nov 25 22:20:34 crc kubenswrapper[4910]: E1125 22:20:34.222574 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54b06eef8c540e3c08a80996650a358ba9da91b99e6e1a6fbc615fc50d83c1e6\": container with ID starting with 54b06eef8c540e3c08a80996650a358ba9da91b99e6e1a6fbc615fc50d83c1e6 not found: ID does not exist" containerID="54b06eef8c540e3c08a80996650a358ba9da91b99e6e1a6fbc615fc50d83c1e6" Nov 25 22:20:34 crc kubenswrapper[4910]: I1125 22:20:34.222633 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54b06eef8c540e3c08a80996650a358ba9da91b99e6e1a6fbc615fc50d83c1e6"} err="failed to get container status \"54b06eef8c540e3c08a80996650a358ba9da91b99e6e1a6fbc615fc50d83c1e6\": rpc error: code = NotFound desc = could not find container \"54b06eef8c540e3c08a80996650a358ba9da91b99e6e1a6fbc615fc50d83c1e6\": container with ID starting with 54b06eef8c540e3c08a80996650a358ba9da91b99e6e1a6fbc615fc50d83c1e6 not found: ID does not exist" Nov 25 22:20:35 crc kubenswrapper[4910]: I1125 22:20:35.219507 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d613d783-08e1-4092-bde7-650a5b2a3431" path="/var/lib/kubelet/pods/d613d783-08e1-4092-bde7-650a5b2a3431/volumes" Nov 25 22:20:48 crc kubenswrapper[4910]: I1125 22:20:48.204543 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:20:48 crc kubenswrapper[4910]: E1125 22:20:48.205658 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:21:03 crc kubenswrapper[4910]: I1125 22:21:03.206494 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:21:03 crc kubenswrapper[4910]: E1125 22:21:03.207776 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:21:16 crc kubenswrapper[4910]: I1125 22:21:16.205152 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:21:16 crc kubenswrapper[4910]: E1125 22:21:16.206287 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:21:27 crc kubenswrapper[4910]: I1125 22:21:27.206303 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:21:27 crc kubenswrapper[4910]: E1125 22:21:27.208234 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:21:42 crc kubenswrapper[4910]: I1125 22:21:42.204064 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:21:42 crc kubenswrapper[4910]: E1125 22:21:42.205124 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:21:56 crc kubenswrapper[4910]: I1125 22:21:56.204462 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:21:56 crc kubenswrapper[4910]: E1125 22:21:56.206143 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:22:07 crc kubenswrapper[4910]: I1125 22:22:07.206508 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:22:07 crc kubenswrapper[4910]: E1125 22:22:07.207509 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:22:18 crc kubenswrapper[4910]: I1125 22:22:18.205329 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:22:18 crc kubenswrapper[4910]: E1125 22:22:18.206924 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:22:32 crc kubenswrapper[4910]: I1125 22:22:32.205478 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:22:32 crc kubenswrapper[4910]: E1125 22:22:32.206759 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:22:46 crc kubenswrapper[4910]: I1125 22:22:46.204347 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:22:46 crc kubenswrapper[4910]: E1125 22:22:46.205838 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:22:59 crc kubenswrapper[4910]: I1125 22:22:59.204526 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:22:59 crc kubenswrapper[4910]: E1125 22:22:59.205396 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:23:13 crc kubenswrapper[4910]: I1125 22:23:13.204509 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:23:13 crc kubenswrapper[4910]: E1125 22:23:13.205610 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:23:24 crc kubenswrapper[4910]: I1125 22:23:24.204752 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:23:24 crc kubenswrapper[4910]: I1125 22:23:24.576946 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"d7555b1701d6f687e8b7dd04e3ebbef12d03cad5c55fe5bf701637ff04e042d9"} Nov 25 22:25:53 crc kubenswrapper[4910]: I1125 22:25:53.099654 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:25:53 crc kubenswrapper[4910]: I1125 22:25:53.100627 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:26:23 crc kubenswrapper[4910]: I1125 22:26:23.099411 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:26:23 crc kubenswrapper[4910]: I1125 22:26:23.100560 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:26:53 crc kubenswrapper[4910]: I1125 22:26:53.099159 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:26:53 crc kubenswrapper[4910]: I1125 22:26:53.100040 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:26:53 crc kubenswrapper[4910]: I1125 22:26:53.100159 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 22:26:53 crc kubenswrapper[4910]: I1125 22:26:53.101439 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d7555b1701d6f687e8b7dd04e3ebbef12d03cad5c55fe5bf701637ff04e042d9"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 22:26:53 crc kubenswrapper[4910]: I1125 22:26:53.101545 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://d7555b1701d6f687e8b7dd04e3ebbef12d03cad5c55fe5bf701637ff04e042d9" gracePeriod=600 Nov 25 22:26:53 crc kubenswrapper[4910]: I1125 22:26:53.446629 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="d7555b1701d6f687e8b7dd04e3ebbef12d03cad5c55fe5bf701637ff04e042d9" exitCode=0 Nov 25 22:26:53 crc kubenswrapper[4910]: I1125 22:26:53.446798 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"d7555b1701d6f687e8b7dd04e3ebbef12d03cad5c55fe5bf701637ff04e042d9"} Nov 25 22:26:53 crc kubenswrapper[4910]: I1125 22:26:53.447026 4910 scope.go:117] "RemoveContainer" containerID="eaa1cbc1cbc869e5957bac21396cd466da1f7fea6b626a6ede71c1e3089be075" Nov 25 22:26:54 crc kubenswrapper[4910]: I1125 22:26:54.465617 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12"} Nov 25 22:27:31 crc kubenswrapper[4910]: I1125 22:27:31.903463 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-94j8s"] Nov 25 22:27:31 crc kubenswrapper[4910]: E1125 22:27:31.907105 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d613d783-08e1-4092-bde7-650a5b2a3431" containerName="extract-content" Nov 25 22:27:31 crc kubenswrapper[4910]: I1125 22:27:31.907135 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d613d783-08e1-4092-bde7-650a5b2a3431" containerName="extract-content" Nov 25 22:27:31 crc kubenswrapper[4910]: E1125 22:27:31.907177 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d613d783-08e1-4092-bde7-650a5b2a3431" containerName="extract-utilities" Nov 25 22:27:31 crc kubenswrapper[4910]: I1125 22:27:31.907186 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d613d783-08e1-4092-bde7-650a5b2a3431" containerName="extract-utilities" Nov 25 22:27:31 crc kubenswrapper[4910]: E1125 22:27:31.907215 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d613d783-08e1-4092-bde7-650a5b2a3431" containerName="registry-server" Nov 25 22:27:31 crc kubenswrapper[4910]: I1125 22:27:31.907223 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d613d783-08e1-4092-bde7-650a5b2a3431" containerName="registry-server" Nov 25 22:27:31 crc kubenswrapper[4910]: I1125 22:27:31.907527 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d613d783-08e1-4092-bde7-650a5b2a3431" containerName="registry-server" Nov 25 22:27:31 crc kubenswrapper[4910]: I1125 22:27:31.909363 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:27:31 crc kubenswrapper[4910]: I1125 22:27:31.927703 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-94j8s"] Nov 25 22:27:32 crc kubenswrapper[4910]: I1125 22:27:32.046718 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldgdw\" (UniqueName: \"kubernetes.io/projected/c4ff820e-8002-4ef2-aec8-296b6ec41494-kube-api-access-ldgdw\") pod \"redhat-operators-94j8s\" (UID: \"c4ff820e-8002-4ef2-aec8-296b6ec41494\") " pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:27:32 crc kubenswrapper[4910]: I1125 22:27:32.046792 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4ff820e-8002-4ef2-aec8-296b6ec41494-utilities\") pod \"redhat-operators-94j8s\" (UID: \"c4ff820e-8002-4ef2-aec8-296b6ec41494\") " pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:27:32 crc kubenswrapper[4910]: I1125 22:27:32.046825 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4ff820e-8002-4ef2-aec8-296b6ec41494-catalog-content\") pod \"redhat-operators-94j8s\" (UID: \"c4ff820e-8002-4ef2-aec8-296b6ec41494\") " pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:27:32 crc kubenswrapper[4910]: I1125 22:27:32.148602 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldgdw\" (UniqueName: \"kubernetes.io/projected/c4ff820e-8002-4ef2-aec8-296b6ec41494-kube-api-access-ldgdw\") pod \"redhat-operators-94j8s\" (UID: \"c4ff820e-8002-4ef2-aec8-296b6ec41494\") " pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:27:32 crc kubenswrapper[4910]: I1125 22:27:32.148905 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4ff820e-8002-4ef2-aec8-296b6ec41494-utilities\") pod \"redhat-operators-94j8s\" (UID: \"c4ff820e-8002-4ef2-aec8-296b6ec41494\") " pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:27:32 crc kubenswrapper[4910]: I1125 22:27:32.148942 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4ff820e-8002-4ef2-aec8-296b6ec41494-catalog-content\") pod \"redhat-operators-94j8s\" (UID: \"c4ff820e-8002-4ef2-aec8-296b6ec41494\") " pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:27:32 crc kubenswrapper[4910]: I1125 22:27:32.149474 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c4ff820e-8002-4ef2-aec8-296b6ec41494-catalog-content\") pod \"redhat-operators-94j8s\" (UID: \"c4ff820e-8002-4ef2-aec8-296b6ec41494\") " pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:27:32 crc kubenswrapper[4910]: I1125 22:27:32.149995 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c4ff820e-8002-4ef2-aec8-296b6ec41494-utilities\") pod \"redhat-operators-94j8s\" (UID: \"c4ff820e-8002-4ef2-aec8-296b6ec41494\") " pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:27:32 crc kubenswrapper[4910]: I1125 22:27:32.171037 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldgdw\" (UniqueName: \"kubernetes.io/projected/c4ff820e-8002-4ef2-aec8-296b6ec41494-kube-api-access-ldgdw\") pod \"redhat-operators-94j8s\" (UID: \"c4ff820e-8002-4ef2-aec8-296b6ec41494\") " pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:27:32 crc kubenswrapper[4910]: I1125 22:27:32.232502 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:27:32 crc kubenswrapper[4910]: I1125 22:27:32.771169 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-94j8s"] Nov 25 22:27:32 crc kubenswrapper[4910]: I1125 22:27:32.920269 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-94j8s" event={"ID":"c4ff820e-8002-4ef2-aec8-296b6ec41494","Type":"ContainerStarted","Data":"6f7faabc77c57944f8e326a6da57f7d818b2f8e688c11e74118798d7329bcbe8"} Nov 25 22:27:33 crc kubenswrapper[4910]: I1125 22:27:33.933086 4910 generic.go:334] "Generic (PLEG): container finished" podID="c4ff820e-8002-4ef2-aec8-296b6ec41494" containerID="0d794f910ef0d88e7cc04fc6313f9755ca115df7b7d3d8544c5745818484f892" exitCode=0 Nov 25 22:27:33 crc kubenswrapper[4910]: I1125 22:27:33.933198 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-94j8s" event={"ID":"c4ff820e-8002-4ef2-aec8-296b6ec41494","Type":"ContainerDied","Data":"0d794f910ef0d88e7cc04fc6313f9755ca115df7b7d3d8544c5745818484f892"} Nov 25 22:27:33 crc kubenswrapper[4910]: I1125 22:27:33.936183 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 22:27:44 crc kubenswrapper[4910]: I1125 22:27:44.077121 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-94j8s" event={"ID":"c4ff820e-8002-4ef2-aec8-296b6ec41494","Type":"ContainerStarted","Data":"beec67f347f31f7c4943c22f0ef5e30d5677ce6b0a5f9c51b94c0453c7859d48"} Nov 25 22:27:45 crc kubenswrapper[4910]: I1125 22:27:45.092514 4910 generic.go:334] "Generic (PLEG): container finished" podID="c4ff820e-8002-4ef2-aec8-296b6ec41494" containerID="beec67f347f31f7c4943c22f0ef5e30d5677ce6b0a5f9c51b94c0453c7859d48" exitCode=0 Nov 25 22:27:45 crc kubenswrapper[4910]: I1125 22:27:45.092580 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-94j8s" event={"ID":"c4ff820e-8002-4ef2-aec8-296b6ec41494","Type":"ContainerDied","Data":"beec67f347f31f7c4943c22f0ef5e30d5677ce6b0a5f9c51b94c0453c7859d48"} Nov 25 22:27:47 crc kubenswrapper[4910]: I1125 22:27:47.121316 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-94j8s" event={"ID":"c4ff820e-8002-4ef2-aec8-296b6ec41494","Type":"ContainerStarted","Data":"de4eed191dddb4be4be915c6845aa6abb12448beaf1b55a00c69c8e6b0fbcc19"} Nov 25 22:27:47 crc kubenswrapper[4910]: I1125 22:27:47.163155 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-94j8s" podStartSLOduration=4.4752044 podStartE2EDuration="16.163125487s" podCreationTimestamp="2025-11-25 22:27:31 +0000 UTC" firstStartedPulling="2025-11-25 22:27:33.935833304 +0000 UTC m=+3409.398309646" lastFinishedPulling="2025-11-25 22:27:45.623754401 +0000 UTC m=+3421.086230733" observedRunningTime="2025-11-25 22:27:47.151128988 +0000 UTC m=+3422.613605310" watchObservedRunningTime="2025-11-25 22:27:47.163125487 +0000 UTC m=+3422.625601849" Nov 25 22:27:52 crc kubenswrapper[4910]: I1125 22:27:52.234063 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:27:52 crc kubenswrapper[4910]: I1125 22:27:52.235536 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:27:53 crc kubenswrapper[4910]: I1125 22:27:53.312925 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-94j8s" podUID="c4ff820e-8002-4ef2-aec8-296b6ec41494" containerName="registry-server" probeResult="failure" output=< Nov 25 22:27:53 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Nov 25 22:27:53 crc kubenswrapper[4910]: > Nov 25 22:28:02 crc kubenswrapper[4910]: I1125 22:28:02.320194 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:28:02 crc kubenswrapper[4910]: I1125 22:28:02.389452 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-94j8s" Nov 25 22:28:02 crc kubenswrapper[4910]: I1125 22:28:02.932331 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-94j8s"] Nov 25 22:28:03 crc kubenswrapper[4910]: I1125 22:28:03.098997 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7h25w"] Nov 25 22:28:03 crc kubenswrapper[4910]: I1125 22:28:03.101200 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-7h25w" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" containerName="registry-server" containerID="cri-o://66338e27ae38d6cedb17471f216d57fd2c4a666cbb08f01df6e8b7fa007043b2" gracePeriod=2 Nov 25 22:28:03 crc kubenswrapper[4910]: I1125 22:28:03.312997 4910 generic.go:334] "Generic (PLEG): container finished" podID="bd3392ac-5439-4adc-8e8c-1378d37225f3" containerID="66338e27ae38d6cedb17471f216d57fd2c4a666cbb08f01df6e8b7fa007043b2" exitCode=0 Nov 25 22:28:03 crc kubenswrapper[4910]: I1125 22:28:03.313102 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7h25w" event={"ID":"bd3392ac-5439-4adc-8e8c-1378d37225f3","Type":"ContainerDied","Data":"66338e27ae38d6cedb17471f216d57fd2c4a666cbb08f01df6e8b7fa007043b2"} Nov 25 22:28:03 crc kubenswrapper[4910]: I1125 22:28:03.676705 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 22:28:03 crc kubenswrapper[4910]: I1125 22:28:03.812701 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zwblf\" (UniqueName: \"kubernetes.io/projected/bd3392ac-5439-4adc-8e8c-1378d37225f3-kube-api-access-zwblf\") pod \"bd3392ac-5439-4adc-8e8c-1378d37225f3\" (UID: \"bd3392ac-5439-4adc-8e8c-1378d37225f3\") " Nov 25 22:28:03 crc kubenswrapper[4910]: I1125 22:28:03.812891 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3392ac-5439-4adc-8e8c-1378d37225f3-utilities\") pod \"bd3392ac-5439-4adc-8e8c-1378d37225f3\" (UID: \"bd3392ac-5439-4adc-8e8c-1378d37225f3\") " Nov 25 22:28:03 crc kubenswrapper[4910]: I1125 22:28:03.813048 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3392ac-5439-4adc-8e8c-1378d37225f3-catalog-content\") pod \"bd3392ac-5439-4adc-8e8c-1378d37225f3\" (UID: \"bd3392ac-5439-4adc-8e8c-1378d37225f3\") " Nov 25 22:28:03 crc kubenswrapper[4910]: I1125 22:28:03.813592 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd3392ac-5439-4adc-8e8c-1378d37225f3-utilities" (OuterVolumeSpecName: "utilities") pod "bd3392ac-5439-4adc-8e8c-1378d37225f3" (UID: "bd3392ac-5439-4adc-8e8c-1378d37225f3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:28:03 crc kubenswrapper[4910]: I1125 22:28:03.820574 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd3392ac-5439-4adc-8e8c-1378d37225f3-kube-api-access-zwblf" (OuterVolumeSpecName: "kube-api-access-zwblf") pod "bd3392ac-5439-4adc-8e8c-1378d37225f3" (UID: "bd3392ac-5439-4adc-8e8c-1378d37225f3"). InnerVolumeSpecName "kube-api-access-zwblf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:28:03 crc kubenswrapper[4910]: I1125 22:28:03.905019 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd3392ac-5439-4adc-8e8c-1378d37225f3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bd3392ac-5439-4adc-8e8c-1378d37225f3" (UID: "bd3392ac-5439-4adc-8e8c-1378d37225f3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:28:03 crc kubenswrapper[4910]: I1125 22:28:03.915740 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zwblf\" (UniqueName: \"kubernetes.io/projected/bd3392ac-5439-4adc-8e8c-1378d37225f3-kube-api-access-zwblf\") on node \"crc\" DevicePath \"\"" Nov 25 22:28:03 crc kubenswrapper[4910]: I1125 22:28:03.915776 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd3392ac-5439-4adc-8e8c-1378d37225f3-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:28:03 crc kubenswrapper[4910]: I1125 22:28:03.915787 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd3392ac-5439-4adc-8e8c-1378d37225f3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:28:04 crc kubenswrapper[4910]: I1125 22:28:04.329809 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-7h25w" Nov 25 22:28:04 crc kubenswrapper[4910]: I1125 22:28:04.331895 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-7h25w" event={"ID":"bd3392ac-5439-4adc-8e8c-1378d37225f3","Type":"ContainerDied","Data":"f5f5d2d0380a37628f47381f10c43d66c25b003dc90531a257442a1a6252a781"} Nov 25 22:28:04 crc kubenswrapper[4910]: I1125 22:28:04.331992 4910 scope.go:117] "RemoveContainer" containerID="66338e27ae38d6cedb17471f216d57fd2c4a666cbb08f01df6e8b7fa007043b2" Nov 25 22:28:04 crc kubenswrapper[4910]: I1125 22:28:04.366962 4910 scope.go:117] "RemoveContainer" containerID="0c726b694051c5e9eefb1b68d8015468187213d31e92683bcf6d9f431a1c3c9b" Nov 25 22:28:04 crc kubenswrapper[4910]: I1125 22:28:04.373529 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-7h25w"] Nov 25 22:28:04 crc kubenswrapper[4910]: I1125 22:28:04.394854 4910 scope.go:117] "RemoveContainer" containerID="f60ac7ca38b33af73afe8d247e08f3bc80f11eccadb1f358b0d6ae1884411e99" Nov 25 22:28:04 crc kubenswrapper[4910]: I1125 22:28:04.395935 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-7h25w"] Nov 25 22:28:05 crc kubenswrapper[4910]: I1125 22:28:05.239756 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" path="/var/lib/kubelet/pods/bd3392ac-5439-4adc-8e8c-1378d37225f3/volumes" Nov 25 22:28:53 crc kubenswrapper[4910]: I1125 22:28:53.099404 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:28:53 crc kubenswrapper[4910]: I1125 22:28:53.100239 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:29:22 crc kubenswrapper[4910]: I1125 22:29:22.375415 4910 generic.go:334] "Generic (PLEG): container finished" podID="c1d3df8e-e3e1-4065-8736-979a4abaec2c" containerID="1c2b4a27d12be81296ba04bbf7a81185b5da36734a7fa6b3ea5534b23d262925" exitCode=0 Nov 25 22:29:22 crc kubenswrapper[4910]: I1125 22:29:22.375603 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"c1d3df8e-e3e1-4065-8736-979a4abaec2c","Type":"ContainerDied","Data":"1c2b4a27d12be81296ba04bbf7a81185b5da36734a7fa6b3ea5534b23d262925"} Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.099743 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.100453 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.791864 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.900672 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c1d3df8e-e3e1-4065-8736-979a4abaec2c-config-data\") pod \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.900736 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5zflq\" (UniqueName: \"kubernetes.io/projected/c1d3df8e-e3e1-4065-8736-979a4abaec2c-kube-api-access-5zflq\") pod \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.900786 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c1d3df8e-e3e1-4065-8736-979a4abaec2c-openstack-config\") pod \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.900846 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-openstack-config-secret\") pod \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.900872 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/c1d3df8e-e3e1-4065-8736-979a4abaec2c-test-operator-ephemeral-workdir\") pod \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.900950 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/c1d3df8e-e3e1-4065-8736-979a4abaec2c-test-operator-ephemeral-temporary\") pod \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.901001 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-ca-certs\") pod \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.901029 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-ssh-key\") pod \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.901125 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\" (UID: \"c1d3df8e-e3e1-4065-8736-979a4abaec2c\") " Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.902061 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1d3df8e-e3e1-4065-8736-979a4abaec2c-config-data" (OuterVolumeSpecName: "config-data") pod "c1d3df8e-e3e1-4065-8736-979a4abaec2c" (UID: "c1d3df8e-e3e1-4065-8736-979a4abaec2c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.902651 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1d3df8e-e3e1-4065-8736-979a4abaec2c-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "c1d3df8e-e3e1-4065-8736-979a4abaec2c" (UID: "c1d3df8e-e3e1-4065-8736-979a4abaec2c"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.912553 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1d3df8e-e3e1-4065-8736-979a4abaec2c-kube-api-access-5zflq" (OuterVolumeSpecName: "kube-api-access-5zflq") pod "c1d3df8e-e3e1-4065-8736-979a4abaec2c" (UID: "c1d3df8e-e3e1-4065-8736-979a4abaec2c"). InnerVolumeSpecName "kube-api-access-5zflq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.914910 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "test-operator-logs") pod "c1d3df8e-e3e1-4065-8736-979a4abaec2c" (UID: "c1d3df8e-e3e1-4065-8736-979a4abaec2c"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.915709 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1d3df8e-e3e1-4065-8736-979a4abaec2c-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "c1d3df8e-e3e1-4065-8736-979a4abaec2c" (UID: "c1d3df8e-e3e1-4065-8736-979a4abaec2c"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.943973 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "c1d3df8e-e3e1-4065-8736-979a4abaec2c" (UID: "c1d3df8e-e3e1-4065-8736-979a4abaec2c"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.944024 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "c1d3df8e-e3e1-4065-8736-979a4abaec2c" (UID: "c1d3df8e-e3e1-4065-8736-979a4abaec2c"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.945391 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "c1d3df8e-e3e1-4065-8736-979a4abaec2c" (UID: "c1d3df8e-e3e1-4065-8736-979a4abaec2c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:29:23 crc kubenswrapper[4910]: I1125 22:29:23.983863 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c1d3df8e-e3e1-4065-8736-979a4abaec2c-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "c1d3df8e-e3e1-4065-8736-979a4abaec2c" (UID: "c1d3df8e-e3e1-4065-8736-979a4abaec2c"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:29:24 crc kubenswrapper[4910]: I1125 22:29:24.003525 4910 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Nov 25 22:29:24 crc kubenswrapper[4910]: I1125 22:29:24.014027 4910 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c1d3df8e-e3e1-4065-8736-979a4abaec2c-config-data\") on node \"crc\" DevicePath \"\"" Nov 25 22:29:24 crc kubenswrapper[4910]: I1125 22:29:24.014235 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5zflq\" (UniqueName: \"kubernetes.io/projected/c1d3df8e-e3e1-4065-8736-979a4abaec2c-kube-api-access-5zflq\") on node \"crc\" DevicePath \"\"" Nov 25 22:29:24 crc kubenswrapper[4910]: I1125 22:29:24.014261 4910 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/c1d3df8e-e3e1-4065-8736-979a4abaec2c-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 25 22:29:24 crc kubenswrapper[4910]: I1125 22:29:24.014274 4910 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 25 22:29:24 crc kubenswrapper[4910]: I1125 22:29:24.014287 4910 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/c1d3df8e-e3e1-4065-8736-979a4abaec2c-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 25 22:29:24 crc kubenswrapper[4910]: I1125 22:29:24.014298 4910 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/c1d3df8e-e3e1-4065-8736-979a4abaec2c-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 25 22:29:24 crc kubenswrapper[4910]: I1125 22:29:24.014311 4910 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 25 22:29:24 crc kubenswrapper[4910]: I1125 22:29:24.014320 4910 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/c1d3df8e-e3e1-4065-8736-979a4abaec2c-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 25 22:29:24 crc kubenswrapper[4910]: I1125 22:29:24.029496 4910 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Nov 25 22:29:24 crc kubenswrapper[4910]: I1125 22:29:24.116041 4910 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Nov 25 22:29:24 crc kubenswrapper[4910]: I1125 22:29:24.401950 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"c1d3df8e-e3e1-4065-8736-979a4abaec2c","Type":"ContainerDied","Data":"a1caf090cf6ef76baf3ea934750466c57363fb286e96ca3663e8c7865932a214"} Nov 25 22:29:24 crc kubenswrapper[4910]: I1125 22:29:24.402456 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a1caf090cf6ef76baf3ea934750466c57363fb286e96ca3663e8c7865932a214" Nov 25 22:29:24 crc kubenswrapper[4910]: I1125 22:29:24.402112 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 25 22:29:31 crc kubenswrapper[4910]: I1125 22:29:31.963080 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 22:29:31 crc kubenswrapper[4910]: E1125 22:29:31.965052 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" containerName="registry-server" Nov 25 22:29:31 crc kubenswrapper[4910]: I1125 22:29:31.965076 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" containerName="registry-server" Nov 25 22:29:31 crc kubenswrapper[4910]: E1125 22:29:31.965094 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" containerName="extract-utilities" Nov 25 22:29:31 crc kubenswrapper[4910]: I1125 22:29:31.965102 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" containerName="extract-utilities" Nov 25 22:29:31 crc kubenswrapper[4910]: E1125 22:29:31.965118 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1d3df8e-e3e1-4065-8736-979a4abaec2c" containerName="tempest-tests-tempest-tests-runner" Nov 25 22:29:31 crc kubenswrapper[4910]: I1125 22:29:31.965127 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1d3df8e-e3e1-4065-8736-979a4abaec2c" containerName="tempest-tests-tempest-tests-runner" Nov 25 22:29:31 crc kubenswrapper[4910]: E1125 22:29:31.965153 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" containerName="extract-content" Nov 25 22:29:31 crc kubenswrapper[4910]: I1125 22:29:31.965161 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" containerName="extract-content" Nov 25 22:29:31 crc kubenswrapper[4910]: I1125 22:29:31.965484 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd3392ac-5439-4adc-8e8c-1378d37225f3" containerName="registry-server" Nov 25 22:29:31 crc kubenswrapper[4910]: I1125 22:29:31.965511 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1d3df8e-e3e1-4065-8736-979a4abaec2c" containerName="tempest-tests-tempest-tests-runner" Nov 25 22:29:31 crc kubenswrapper[4910]: I1125 22:29:31.966734 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 22:29:31 crc kubenswrapper[4910]: I1125 22:29:31.982630 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 22:29:31 crc kubenswrapper[4910]: I1125 22:29:31.983220 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-d4cgq" Nov 25 22:29:32 crc kubenswrapper[4910]: I1125 22:29:32.010615 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqr6j\" (UniqueName: \"kubernetes.io/projected/2fc860e3-9496-49ee-8083-468eb806013d-kube-api-access-tqr6j\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2fc860e3-9496-49ee-8083-468eb806013d\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 22:29:32 crc kubenswrapper[4910]: I1125 22:29:32.011123 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2fc860e3-9496-49ee-8083-468eb806013d\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 22:29:32 crc kubenswrapper[4910]: I1125 22:29:32.113493 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqr6j\" (UniqueName: \"kubernetes.io/projected/2fc860e3-9496-49ee-8083-468eb806013d-kube-api-access-tqr6j\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2fc860e3-9496-49ee-8083-468eb806013d\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 22:29:32 crc kubenswrapper[4910]: I1125 22:29:32.113600 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2fc860e3-9496-49ee-8083-468eb806013d\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 22:29:32 crc kubenswrapper[4910]: I1125 22:29:32.114333 4910 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2fc860e3-9496-49ee-8083-468eb806013d\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 22:29:32 crc kubenswrapper[4910]: I1125 22:29:32.149078 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqr6j\" (UniqueName: \"kubernetes.io/projected/2fc860e3-9496-49ee-8083-468eb806013d-kube-api-access-tqr6j\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2fc860e3-9496-49ee-8083-468eb806013d\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 22:29:32 crc kubenswrapper[4910]: I1125 22:29:32.156626 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"2fc860e3-9496-49ee-8083-468eb806013d\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 22:29:32 crc kubenswrapper[4910]: I1125 22:29:32.325597 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 25 22:29:32 crc kubenswrapper[4910]: I1125 22:29:32.889267 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 25 22:29:32 crc kubenswrapper[4910]: W1125 22:29:32.895294 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2fc860e3_9496_49ee_8083_468eb806013d.slice/crio-ffd6d7853a75eeece8343fcf71d459c894c47953330356be39a628f785dbc514 WatchSource:0}: Error finding container ffd6d7853a75eeece8343fcf71d459c894c47953330356be39a628f785dbc514: Status 404 returned error can't find the container with id ffd6d7853a75eeece8343fcf71d459c894c47953330356be39a628f785dbc514 Nov 25 22:29:33 crc kubenswrapper[4910]: I1125 22:29:33.528003 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"2fc860e3-9496-49ee-8083-468eb806013d","Type":"ContainerStarted","Data":"ffd6d7853a75eeece8343fcf71d459c894c47953330356be39a628f785dbc514"} Nov 25 22:29:34 crc kubenswrapper[4910]: I1125 22:29:34.540220 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"2fc860e3-9496-49ee-8083-468eb806013d","Type":"ContainerStarted","Data":"dc230a238f09194ebedf1ba7fb0fff6d1cca1361241fdce81afd9492feafecda"} Nov 25 22:29:34 crc kubenswrapper[4910]: I1125 22:29:34.572545 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.64103147 podStartE2EDuration="3.572519551s" podCreationTimestamp="2025-11-25 22:29:31 +0000 UTC" firstStartedPulling="2025-11-25 22:29:32.898066654 +0000 UTC m=+3528.360542986" lastFinishedPulling="2025-11-25 22:29:33.829554715 +0000 UTC m=+3529.292031067" observedRunningTime="2025-11-25 22:29:34.558699253 +0000 UTC m=+3530.021175605" watchObservedRunningTime="2025-11-25 22:29:34.572519551 +0000 UTC m=+3530.034995883" Nov 25 22:29:53 crc kubenswrapper[4910]: I1125 22:29:53.099452 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:29:53 crc kubenswrapper[4910]: I1125 22:29:53.100462 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:29:53 crc kubenswrapper[4910]: I1125 22:29:53.100526 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 22:29:53 crc kubenswrapper[4910]: I1125 22:29:53.101966 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 22:29:53 crc kubenswrapper[4910]: I1125 22:29:53.102039 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" gracePeriod=600 Nov 25 22:29:53 crc kubenswrapper[4910]: E1125 22:29:53.241424 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:29:53 crc kubenswrapper[4910]: I1125 22:29:53.800455 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" exitCode=0 Nov 25 22:29:53 crc kubenswrapper[4910]: I1125 22:29:53.800525 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12"} Nov 25 22:29:53 crc kubenswrapper[4910]: I1125 22:29:53.800626 4910 scope.go:117] "RemoveContainer" containerID="d7555b1701d6f687e8b7dd04e3ebbef12d03cad5c55fe5bf701637ff04e042d9" Nov 25 22:29:53 crc kubenswrapper[4910]: I1125 22:29:53.802265 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:29:53 crc kubenswrapper[4910]: E1125 22:29:53.803426 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:29:56 crc kubenswrapper[4910]: I1125 22:29:56.910958 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-lqrwm/must-gather-fnf5s"] Nov 25 22:29:56 crc kubenswrapper[4910]: I1125 22:29:56.913887 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/must-gather-fnf5s" Nov 25 22:29:56 crc kubenswrapper[4910]: I1125 22:29:56.917702 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-lqrwm"/"openshift-service-ca.crt" Nov 25 22:29:56 crc kubenswrapper[4910]: I1125 22:29:56.917827 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-lqrwm"/"kube-root-ca.crt" Nov 25 22:29:56 crc kubenswrapper[4910]: I1125 22:29:56.918068 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-lqrwm"/"default-dockercfg-74b2n" Nov 25 22:29:56 crc kubenswrapper[4910]: I1125 22:29:56.922648 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-lqrwm/must-gather-fnf5s"] Nov 25 22:29:57 crc kubenswrapper[4910]: I1125 22:29:57.094993 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9de7ad82-ff82-47a7-9393-cf5ce90f749e-must-gather-output\") pod \"must-gather-fnf5s\" (UID: \"9de7ad82-ff82-47a7-9393-cf5ce90f749e\") " pod="openshift-must-gather-lqrwm/must-gather-fnf5s" Nov 25 22:29:57 crc kubenswrapper[4910]: I1125 22:29:57.095230 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbxsv\" (UniqueName: \"kubernetes.io/projected/9de7ad82-ff82-47a7-9393-cf5ce90f749e-kube-api-access-bbxsv\") pod \"must-gather-fnf5s\" (UID: \"9de7ad82-ff82-47a7-9393-cf5ce90f749e\") " pod="openshift-must-gather-lqrwm/must-gather-fnf5s" Nov 25 22:29:57 crc kubenswrapper[4910]: I1125 22:29:57.197450 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9de7ad82-ff82-47a7-9393-cf5ce90f749e-must-gather-output\") pod \"must-gather-fnf5s\" (UID: \"9de7ad82-ff82-47a7-9393-cf5ce90f749e\") " pod="openshift-must-gather-lqrwm/must-gather-fnf5s" Nov 25 22:29:57 crc kubenswrapper[4910]: I1125 22:29:57.197531 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbxsv\" (UniqueName: \"kubernetes.io/projected/9de7ad82-ff82-47a7-9393-cf5ce90f749e-kube-api-access-bbxsv\") pod \"must-gather-fnf5s\" (UID: \"9de7ad82-ff82-47a7-9393-cf5ce90f749e\") " pod="openshift-must-gather-lqrwm/must-gather-fnf5s" Nov 25 22:29:57 crc kubenswrapper[4910]: I1125 22:29:57.197906 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9de7ad82-ff82-47a7-9393-cf5ce90f749e-must-gather-output\") pod \"must-gather-fnf5s\" (UID: \"9de7ad82-ff82-47a7-9393-cf5ce90f749e\") " pod="openshift-must-gather-lqrwm/must-gather-fnf5s" Nov 25 22:29:57 crc kubenswrapper[4910]: I1125 22:29:57.226164 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbxsv\" (UniqueName: \"kubernetes.io/projected/9de7ad82-ff82-47a7-9393-cf5ce90f749e-kube-api-access-bbxsv\") pod \"must-gather-fnf5s\" (UID: \"9de7ad82-ff82-47a7-9393-cf5ce90f749e\") " pod="openshift-must-gather-lqrwm/must-gather-fnf5s" Nov 25 22:29:57 crc kubenswrapper[4910]: I1125 22:29:57.237029 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/must-gather-fnf5s" Nov 25 22:29:57 crc kubenswrapper[4910]: W1125 22:29:57.810540 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9de7ad82_ff82_47a7_9393_cf5ce90f749e.slice/crio-278f9b308ffa7456d3f04397107629a95520161bf649dc6f485b14f8412ce7ef WatchSource:0}: Error finding container 278f9b308ffa7456d3f04397107629a95520161bf649dc6f485b14f8412ce7ef: Status 404 returned error can't find the container with id 278f9b308ffa7456d3f04397107629a95520161bf649dc6f485b14f8412ce7ef Nov 25 22:29:57 crc kubenswrapper[4910]: I1125 22:29:57.823111 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-lqrwm/must-gather-fnf5s"] Nov 25 22:29:57 crc kubenswrapper[4910]: I1125 22:29:57.860995 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lqrwm/must-gather-fnf5s" event={"ID":"9de7ad82-ff82-47a7-9393-cf5ce90f749e","Type":"ContainerStarted","Data":"278f9b308ffa7456d3f04397107629a95520161bf649dc6f485b14f8412ce7ef"} Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.179686 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw"] Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.182070 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.185536 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.185744 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.193898 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw"] Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.273406 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fw5dv\" (UniqueName: \"kubernetes.io/projected/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-kube-api-access-fw5dv\") pod \"collect-profiles-29401830-rxjqw\" (UID: \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.273877 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-config-volume\") pod \"collect-profiles-29401830-rxjqw\" (UID: \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.273948 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-secret-volume\") pod \"collect-profiles-29401830-rxjqw\" (UID: \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.378111 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-config-volume\") pod \"collect-profiles-29401830-rxjqw\" (UID: \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.378165 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-secret-volume\") pod \"collect-profiles-29401830-rxjqw\" (UID: \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.378339 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fw5dv\" (UniqueName: \"kubernetes.io/projected/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-kube-api-access-fw5dv\") pod \"collect-profiles-29401830-rxjqw\" (UID: \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.381079 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-config-volume\") pod \"collect-profiles-29401830-rxjqw\" (UID: \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.390987 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-secret-volume\") pod \"collect-profiles-29401830-rxjqw\" (UID: \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.399125 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fw5dv\" (UniqueName: \"kubernetes.io/projected/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-kube-api-access-fw5dv\") pod \"collect-profiles-29401830-rxjqw\" (UID: \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" Nov 25 22:30:00 crc kubenswrapper[4910]: I1125 22:30:00.519076 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" Nov 25 22:30:01 crc kubenswrapper[4910]: I1125 22:30:01.023704 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw"] Nov 25 22:30:02 crc kubenswrapper[4910]: W1125 22:30:02.980369 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd2014a09_6ad9_4a6b_a604_63ccc5d731a2.slice/crio-f92d0f8215197252fc40f1ec0b7c6e9854d772c664f77f05b714e3516390d5a9 WatchSource:0}: Error finding container f92d0f8215197252fc40f1ec0b7c6e9854d772c664f77f05b714e3516390d5a9: Status 404 returned error can't find the container with id f92d0f8215197252fc40f1ec0b7c6e9854d772c664f77f05b714e3516390d5a9 Nov 25 22:30:03 crc kubenswrapper[4910]: I1125 22:30:03.941395 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lqrwm/must-gather-fnf5s" event={"ID":"9de7ad82-ff82-47a7-9393-cf5ce90f749e","Type":"ContainerStarted","Data":"6efac0be1c740c8bdfcef85c95345ae2cbbfd6cbc1a059efa126f7a925939951"} Nov 25 22:30:03 crc kubenswrapper[4910]: I1125 22:30:03.942879 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lqrwm/must-gather-fnf5s" event={"ID":"9de7ad82-ff82-47a7-9393-cf5ce90f749e","Type":"ContainerStarted","Data":"3900b6394b9685f236a3b034b29aa8aa14b3f1920d76304c688685f5d3915607"} Nov 25 22:30:03 crc kubenswrapper[4910]: I1125 22:30:03.944904 4910 generic.go:334] "Generic (PLEG): container finished" podID="d2014a09-6ad9-4a6b-a604-63ccc5d731a2" containerID="96c61c3d6a85ced1277e8e75b0e517e20b4f5c98e18fe57bf8f1f41950e7b3b8" exitCode=0 Nov 25 22:30:03 crc kubenswrapper[4910]: I1125 22:30:03.944987 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" event={"ID":"d2014a09-6ad9-4a6b-a604-63ccc5d731a2","Type":"ContainerDied","Data":"96c61c3d6a85ced1277e8e75b0e517e20b4f5c98e18fe57bf8f1f41950e7b3b8"} Nov 25 22:30:03 crc kubenswrapper[4910]: I1125 22:30:03.945036 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" event={"ID":"d2014a09-6ad9-4a6b-a604-63ccc5d731a2","Type":"ContainerStarted","Data":"f92d0f8215197252fc40f1ec0b7c6e9854d772c664f77f05b714e3516390d5a9"} Nov 25 22:30:04 crc kubenswrapper[4910]: I1125 22:30:04.006470 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-lqrwm/must-gather-fnf5s" podStartSLOduration=2.766046385 podStartE2EDuration="8.00643996s" podCreationTimestamp="2025-11-25 22:29:56 +0000 UTC" firstStartedPulling="2025-11-25 22:29:57.816487833 +0000 UTC m=+3553.278964155" lastFinishedPulling="2025-11-25 22:30:03.056881398 +0000 UTC m=+3558.519357730" observedRunningTime="2025-11-25 22:30:03.968445438 +0000 UTC m=+3559.430921760" watchObservedRunningTime="2025-11-25 22:30:04.00643996 +0000 UTC m=+3559.468916292" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.046233 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-d27xp"] Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.049003 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.097257 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d27xp"] Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.193886 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-catalog-content\") pod \"community-operators-d27xp\" (UID: \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\") " pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.193952 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5lcf\" (UniqueName: \"kubernetes.io/projected/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-kube-api-access-v5lcf\") pod \"community-operators-d27xp\" (UID: \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\") " pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.194003 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-utilities\") pod \"community-operators-d27xp\" (UID: \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\") " pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.296011 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-catalog-content\") pod \"community-operators-d27xp\" (UID: \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\") " pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.296072 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5lcf\" (UniqueName: \"kubernetes.io/projected/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-kube-api-access-v5lcf\") pod \"community-operators-d27xp\" (UID: \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\") " pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.296124 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-utilities\") pod \"community-operators-d27xp\" (UID: \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\") " pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.296701 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-utilities\") pod \"community-operators-d27xp\" (UID: \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\") " pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.297142 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-catalog-content\") pod \"community-operators-d27xp\" (UID: \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\") " pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.325041 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5lcf\" (UniqueName: \"kubernetes.io/projected/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-kube-api-access-v5lcf\") pod \"community-operators-d27xp\" (UID: \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\") " pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.408230 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.421322 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.516020 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-config-volume\") pod \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\" (UID: \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\") " Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.516236 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-secret-volume\") pod \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\" (UID: \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\") " Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.516438 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fw5dv\" (UniqueName: \"kubernetes.io/projected/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-kube-api-access-fw5dv\") pod \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\" (UID: \"d2014a09-6ad9-4a6b-a604-63ccc5d731a2\") " Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.517935 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-config-volume" (OuterVolumeSpecName: "config-volume") pod "d2014a09-6ad9-4a6b-a604-63ccc5d731a2" (UID: "d2014a09-6ad9-4a6b-a604-63ccc5d731a2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.565722 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-kube-api-access-fw5dv" (OuterVolumeSpecName: "kube-api-access-fw5dv") pod "d2014a09-6ad9-4a6b-a604-63ccc5d731a2" (UID: "d2014a09-6ad9-4a6b-a604-63ccc5d731a2"). InnerVolumeSpecName "kube-api-access-fw5dv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.565860 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d2014a09-6ad9-4a6b-a604-63ccc5d731a2" (UID: "d2014a09-6ad9-4a6b-a604-63ccc5d731a2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.618137 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.618175 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fw5dv\" (UniqueName: \"kubernetes.io/projected/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-kube-api-access-fw5dv\") on node \"crc\" DevicePath \"\"" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.618186 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d2014a09-6ad9-4a6b-a604-63ccc5d731a2-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.866450 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-d27xp"] Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.969668 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d27xp" event={"ID":"dc649d1b-8f85-4137-9eb9-4d39661ae3eb","Type":"ContainerStarted","Data":"4cbd5b0b06e12fec31ec0ae2bfd8e8c1d60bd71bea2f61c4e1b803e71cf8bac9"} Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.974500 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.975662 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401830-rxjqw" event={"ID":"d2014a09-6ad9-4a6b-a604-63ccc5d731a2","Type":"ContainerDied","Data":"f92d0f8215197252fc40f1ec0b7c6e9854d772c664f77f05b714e3516390d5a9"} Nov 25 22:30:05 crc kubenswrapper[4910]: I1125 22:30:05.975719 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f92d0f8215197252fc40f1ec0b7c6e9854d772c664f77f05b714e3516390d5a9" Nov 25 22:30:06 crc kubenswrapper[4910]: I1125 22:30:06.518095 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj"] Nov 25 22:30:06 crc kubenswrapper[4910]: I1125 22:30:06.529174 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401785-5vzqj"] Nov 25 22:30:07 crc kubenswrapper[4910]: I1125 22:30:07.001218 4910 generic.go:334] "Generic (PLEG): container finished" podID="dc649d1b-8f85-4137-9eb9-4d39661ae3eb" containerID="247d825dfb78a4cc7835d895e30509dd84b473be213437e6bcd4306c7b2eb0e8" exitCode=0 Nov 25 22:30:07 crc kubenswrapper[4910]: I1125 22:30:07.001355 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d27xp" event={"ID":"dc649d1b-8f85-4137-9eb9-4d39661ae3eb","Type":"ContainerDied","Data":"247d825dfb78a4cc7835d895e30509dd84b473be213437e6bcd4306c7b2eb0e8"} Nov 25 22:30:07 crc kubenswrapper[4910]: I1125 22:30:07.206032 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:30:07 crc kubenswrapper[4910]: E1125 22:30:07.206647 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:30:07 crc kubenswrapper[4910]: I1125 22:30:07.218026 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb61b0ae-981a-40d7-b94d-ecdec564363d" path="/var/lib/kubelet/pods/cb61b0ae-981a-40d7-b94d-ecdec564363d/volumes" Nov 25 22:30:07 crc kubenswrapper[4910]: I1125 22:30:07.799439 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-lqrwm/crc-debug-dvn7k"] Nov 25 22:30:07 crc kubenswrapper[4910]: E1125 22:30:07.800036 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2014a09-6ad9-4a6b-a604-63ccc5d731a2" containerName="collect-profiles" Nov 25 22:30:07 crc kubenswrapper[4910]: I1125 22:30:07.800055 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2014a09-6ad9-4a6b-a604-63ccc5d731a2" containerName="collect-profiles" Nov 25 22:30:07 crc kubenswrapper[4910]: I1125 22:30:07.800280 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2014a09-6ad9-4a6b-a604-63ccc5d731a2" containerName="collect-profiles" Nov 25 22:30:07 crc kubenswrapper[4910]: I1125 22:30:07.801038 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/crc-debug-dvn7k" Nov 25 22:30:07 crc kubenswrapper[4910]: I1125 22:30:07.986808 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/eef2ce10-a3a7-4475-9fa5-63614673cf7a-host\") pod \"crc-debug-dvn7k\" (UID: \"eef2ce10-a3a7-4475-9fa5-63614673cf7a\") " pod="openshift-must-gather-lqrwm/crc-debug-dvn7k" Nov 25 22:30:07 crc kubenswrapper[4910]: I1125 22:30:07.987460 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgmx9\" (UniqueName: \"kubernetes.io/projected/eef2ce10-a3a7-4475-9fa5-63614673cf7a-kube-api-access-kgmx9\") pod \"crc-debug-dvn7k\" (UID: \"eef2ce10-a3a7-4475-9fa5-63614673cf7a\") " pod="openshift-must-gather-lqrwm/crc-debug-dvn7k" Nov 25 22:30:08 crc kubenswrapper[4910]: I1125 22:30:08.012957 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d27xp" event={"ID":"dc649d1b-8f85-4137-9eb9-4d39661ae3eb","Type":"ContainerStarted","Data":"d18878c6ef69621f5436f7331c9189714bf47381f5dafe843f202cf058838280"} Nov 25 22:30:08 crc kubenswrapper[4910]: I1125 22:30:08.089356 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/eef2ce10-a3a7-4475-9fa5-63614673cf7a-host\") pod \"crc-debug-dvn7k\" (UID: \"eef2ce10-a3a7-4475-9fa5-63614673cf7a\") " pod="openshift-must-gather-lqrwm/crc-debug-dvn7k" Nov 25 22:30:08 crc kubenswrapper[4910]: I1125 22:30:08.089485 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgmx9\" (UniqueName: \"kubernetes.io/projected/eef2ce10-a3a7-4475-9fa5-63614673cf7a-kube-api-access-kgmx9\") pod \"crc-debug-dvn7k\" (UID: \"eef2ce10-a3a7-4475-9fa5-63614673cf7a\") " pod="openshift-must-gather-lqrwm/crc-debug-dvn7k" Nov 25 22:30:08 crc kubenswrapper[4910]: I1125 22:30:08.089544 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/eef2ce10-a3a7-4475-9fa5-63614673cf7a-host\") pod \"crc-debug-dvn7k\" (UID: \"eef2ce10-a3a7-4475-9fa5-63614673cf7a\") " pod="openshift-must-gather-lqrwm/crc-debug-dvn7k" Nov 25 22:30:08 crc kubenswrapper[4910]: I1125 22:30:08.125742 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgmx9\" (UniqueName: \"kubernetes.io/projected/eef2ce10-a3a7-4475-9fa5-63614673cf7a-kube-api-access-kgmx9\") pod \"crc-debug-dvn7k\" (UID: \"eef2ce10-a3a7-4475-9fa5-63614673cf7a\") " pod="openshift-must-gather-lqrwm/crc-debug-dvn7k" Nov 25 22:30:08 crc kubenswrapper[4910]: I1125 22:30:08.149155 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/crc-debug-dvn7k" Nov 25 22:30:09 crc kubenswrapper[4910]: I1125 22:30:09.025537 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lqrwm/crc-debug-dvn7k" event={"ID":"eef2ce10-a3a7-4475-9fa5-63614673cf7a","Type":"ContainerStarted","Data":"6a3a1581777b2b8c93de6a938eb0b7c32b0c58d187daa19755cd14910b2bb885"} Nov 25 22:30:09 crc kubenswrapper[4910]: I1125 22:30:09.029839 4910 generic.go:334] "Generic (PLEG): container finished" podID="dc649d1b-8f85-4137-9eb9-4d39661ae3eb" containerID="d18878c6ef69621f5436f7331c9189714bf47381f5dafe843f202cf058838280" exitCode=0 Nov 25 22:30:09 crc kubenswrapper[4910]: I1125 22:30:09.029974 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d27xp" event={"ID":"dc649d1b-8f85-4137-9eb9-4d39661ae3eb","Type":"ContainerDied","Data":"d18878c6ef69621f5436f7331c9189714bf47381f5dafe843f202cf058838280"} Nov 25 22:30:10 crc kubenswrapper[4910]: I1125 22:30:10.046543 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d27xp" event={"ID":"dc649d1b-8f85-4137-9eb9-4d39661ae3eb","Type":"ContainerStarted","Data":"41e86101e77aab0012d9fcf5b41f2ce2a036d8322e00ee557df804a7359f4ccb"} Nov 25 22:30:10 crc kubenswrapper[4910]: I1125 22:30:10.071617 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-d27xp" podStartSLOduration=2.6270923059999998 podStartE2EDuration="5.071596953s" podCreationTimestamp="2025-11-25 22:30:05 +0000 UTC" firstStartedPulling="2025-11-25 22:30:07.007591989 +0000 UTC m=+3562.470068311" lastFinishedPulling="2025-11-25 22:30:09.452096636 +0000 UTC m=+3564.914572958" observedRunningTime="2025-11-25 22:30:10.068027948 +0000 UTC m=+3565.530504260" watchObservedRunningTime="2025-11-25 22:30:10.071596953 +0000 UTC m=+3565.534073265" Nov 25 22:30:12 crc kubenswrapper[4910]: I1125 22:30:12.561648 4910 scope.go:117] "RemoveContainer" containerID="456e43156a749002a3a842f5e6dfbf69e50aad12da6f293a187bdbb957815b9d" Nov 25 22:30:15 crc kubenswrapper[4910]: I1125 22:30:15.422516 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:15 crc kubenswrapper[4910]: I1125 22:30:15.423002 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:15 crc kubenswrapper[4910]: I1125 22:30:15.494052 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:16 crc kubenswrapper[4910]: I1125 22:30:16.175748 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:18 crc kubenswrapper[4910]: I1125 22:30:18.604789 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d27xp"] Nov 25 22:30:18 crc kubenswrapper[4910]: I1125 22:30:18.606947 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-d27xp" podUID="dc649d1b-8f85-4137-9eb9-4d39661ae3eb" containerName="registry-server" containerID="cri-o://41e86101e77aab0012d9fcf5b41f2ce2a036d8322e00ee557df804a7359f4ccb" gracePeriod=2 Nov 25 22:30:19 crc kubenswrapper[4910]: I1125 22:30:19.151411 4910 generic.go:334] "Generic (PLEG): container finished" podID="dc649d1b-8f85-4137-9eb9-4d39661ae3eb" containerID="41e86101e77aab0012d9fcf5b41f2ce2a036d8322e00ee557df804a7359f4ccb" exitCode=0 Nov 25 22:30:19 crc kubenswrapper[4910]: I1125 22:30:19.151467 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d27xp" event={"ID":"dc649d1b-8f85-4137-9eb9-4d39661ae3eb","Type":"ContainerDied","Data":"41e86101e77aab0012d9fcf5b41f2ce2a036d8322e00ee557df804a7359f4ccb"} Nov 25 22:30:20 crc kubenswrapper[4910]: I1125 22:30:20.691273 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:20 crc kubenswrapper[4910]: I1125 22:30:20.718780 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-catalog-content\") pod \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\" (UID: \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\") " Nov 25 22:30:20 crc kubenswrapper[4910]: I1125 22:30:20.719552 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-utilities\") pod \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\" (UID: \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\") " Nov 25 22:30:20 crc kubenswrapper[4910]: I1125 22:30:20.720205 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-utilities" (OuterVolumeSpecName: "utilities") pod "dc649d1b-8f85-4137-9eb9-4d39661ae3eb" (UID: "dc649d1b-8f85-4137-9eb9-4d39661ae3eb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:30:20 crc kubenswrapper[4910]: I1125 22:30:20.720356 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v5lcf\" (UniqueName: \"kubernetes.io/projected/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-kube-api-access-v5lcf\") pod \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\" (UID: \"dc649d1b-8f85-4137-9eb9-4d39661ae3eb\") " Nov 25 22:30:20 crc kubenswrapper[4910]: I1125 22:30:20.722299 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:30:20 crc kubenswrapper[4910]: I1125 22:30:20.726545 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-kube-api-access-v5lcf" (OuterVolumeSpecName: "kube-api-access-v5lcf") pod "dc649d1b-8f85-4137-9eb9-4d39661ae3eb" (UID: "dc649d1b-8f85-4137-9eb9-4d39661ae3eb"). InnerVolumeSpecName "kube-api-access-v5lcf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:30:20 crc kubenswrapper[4910]: I1125 22:30:20.784373 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dc649d1b-8f85-4137-9eb9-4d39661ae3eb" (UID: "dc649d1b-8f85-4137-9eb9-4d39661ae3eb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:30:20 crc kubenswrapper[4910]: I1125 22:30:20.824753 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v5lcf\" (UniqueName: \"kubernetes.io/projected/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-kube-api-access-v5lcf\") on node \"crc\" DevicePath \"\"" Nov 25 22:30:20 crc kubenswrapper[4910]: I1125 22:30:20.824801 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc649d1b-8f85-4137-9eb9-4d39661ae3eb-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:30:21 crc kubenswrapper[4910]: I1125 22:30:21.173217 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lqrwm/crc-debug-dvn7k" event={"ID":"eef2ce10-a3a7-4475-9fa5-63614673cf7a","Type":"ContainerStarted","Data":"1dbe301091e89d355a7757858dfbaa82c96bd8bf45f405fd338f64adc9c41302"} Nov 25 22:30:21 crc kubenswrapper[4910]: I1125 22:30:21.176664 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-d27xp" event={"ID":"dc649d1b-8f85-4137-9eb9-4d39661ae3eb","Type":"ContainerDied","Data":"4cbd5b0b06e12fec31ec0ae2bfd8e8c1d60bd71bea2f61c4e1b803e71cf8bac9"} Nov 25 22:30:21 crc kubenswrapper[4910]: I1125 22:30:21.176718 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-d27xp" Nov 25 22:30:21 crc kubenswrapper[4910]: I1125 22:30:21.176835 4910 scope.go:117] "RemoveContainer" containerID="41e86101e77aab0012d9fcf5b41f2ce2a036d8322e00ee557df804a7359f4ccb" Nov 25 22:30:21 crc kubenswrapper[4910]: I1125 22:30:21.196898 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-lqrwm/crc-debug-dvn7k" podStartSLOduration=2.106952698 podStartE2EDuration="14.196870466s" podCreationTimestamp="2025-11-25 22:30:07 +0000 UTC" firstStartedPulling="2025-11-25 22:30:08.28885636 +0000 UTC m=+3563.751332682" lastFinishedPulling="2025-11-25 22:30:20.378774138 +0000 UTC m=+3575.841250450" observedRunningTime="2025-11-25 22:30:21.193857896 +0000 UTC m=+3576.656334238" watchObservedRunningTime="2025-11-25 22:30:21.196870466 +0000 UTC m=+3576.659346788" Nov 25 22:30:21 crc kubenswrapper[4910]: I1125 22:30:21.206656 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:30:21 crc kubenswrapper[4910]: E1125 22:30:21.207058 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:30:21 crc kubenswrapper[4910]: I1125 22:30:21.211631 4910 scope.go:117] "RemoveContainer" containerID="d18878c6ef69621f5436f7331c9189714bf47381f5dafe843f202cf058838280" Nov 25 22:30:21 crc kubenswrapper[4910]: I1125 22:30:21.238431 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-d27xp"] Nov 25 22:30:21 crc kubenswrapper[4910]: I1125 22:30:21.243078 4910 scope.go:117] "RemoveContainer" containerID="247d825dfb78a4cc7835d895e30509dd84b473be213437e6bcd4306c7b2eb0e8" Nov 25 22:30:21 crc kubenswrapper[4910]: I1125 22:30:21.253425 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-d27xp"] Nov 25 22:30:23 crc kubenswrapper[4910]: I1125 22:30:23.232047 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc649d1b-8f85-4137-9eb9-4d39661ae3eb" path="/var/lib/kubelet/pods/dc649d1b-8f85-4137-9eb9-4d39661ae3eb/volumes" Nov 25 22:30:35 crc kubenswrapper[4910]: I1125 22:30:35.211393 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:30:35 crc kubenswrapper[4910]: E1125 22:30:35.212554 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:30:36 crc kubenswrapper[4910]: I1125 22:30:36.957197 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-s2lt8"] Nov 25 22:30:36 crc kubenswrapper[4910]: E1125 22:30:36.958285 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc649d1b-8f85-4137-9eb9-4d39661ae3eb" containerName="extract-utilities" Nov 25 22:30:36 crc kubenswrapper[4910]: I1125 22:30:36.958300 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc649d1b-8f85-4137-9eb9-4d39661ae3eb" containerName="extract-utilities" Nov 25 22:30:36 crc kubenswrapper[4910]: E1125 22:30:36.958315 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc649d1b-8f85-4137-9eb9-4d39661ae3eb" containerName="extract-content" Nov 25 22:30:36 crc kubenswrapper[4910]: I1125 22:30:36.958321 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc649d1b-8f85-4137-9eb9-4d39661ae3eb" containerName="extract-content" Nov 25 22:30:36 crc kubenswrapper[4910]: E1125 22:30:36.958373 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc649d1b-8f85-4137-9eb9-4d39661ae3eb" containerName="registry-server" Nov 25 22:30:36 crc kubenswrapper[4910]: I1125 22:30:36.958380 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc649d1b-8f85-4137-9eb9-4d39661ae3eb" containerName="registry-server" Nov 25 22:30:36 crc kubenswrapper[4910]: I1125 22:30:36.958608 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc649d1b-8f85-4137-9eb9-4d39661ae3eb" containerName="registry-server" Nov 25 22:30:36 crc kubenswrapper[4910]: I1125 22:30:36.960034 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:36 crc kubenswrapper[4910]: I1125 22:30:36.978768 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s2lt8"] Nov 25 22:30:36 crc kubenswrapper[4910]: I1125 22:30:36.996072 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v86bn\" (UniqueName: \"kubernetes.io/projected/e51f49da-a08c-442b-bdb7-ae6039b2ce69-kube-api-access-v86bn\") pod \"redhat-marketplace-s2lt8\" (UID: \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\") " pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:36 crc kubenswrapper[4910]: I1125 22:30:36.996221 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e51f49da-a08c-442b-bdb7-ae6039b2ce69-catalog-content\") pod \"redhat-marketplace-s2lt8\" (UID: \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\") " pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:36 crc kubenswrapper[4910]: I1125 22:30:36.996299 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e51f49da-a08c-442b-bdb7-ae6039b2ce69-utilities\") pod \"redhat-marketplace-s2lt8\" (UID: \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\") " pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:37 crc kubenswrapper[4910]: I1125 22:30:37.098556 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e51f49da-a08c-442b-bdb7-ae6039b2ce69-catalog-content\") pod \"redhat-marketplace-s2lt8\" (UID: \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\") " pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:37 crc kubenswrapper[4910]: I1125 22:30:37.098652 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e51f49da-a08c-442b-bdb7-ae6039b2ce69-utilities\") pod \"redhat-marketplace-s2lt8\" (UID: \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\") " pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:37 crc kubenswrapper[4910]: I1125 22:30:37.098823 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v86bn\" (UniqueName: \"kubernetes.io/projected/e51f49da-a08c-442b-bdb7-ae6039b2ce69-kube-api-access-v86bn\") pod \"redhat-marketplace-s2lt8\" (UID: \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\") " pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:37 crc kubenswrapper[4910]: I1125 22:30:37.099645 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e51f49da-a08c-442b-bdb7-ae6039b2ce69-catalog-content\") pod \"redhat-marketplace-s2lt8\" (UID: \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\") " pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:37 crc kubenswrapper[4910]: I1125 22:30:37.099661 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e51f49da-a08c-442b-bdb7-ae6039b2ce69-utilities\") pod \"redhat-marketplace-s2lt8\" (UID: \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\") " pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:37 crc kubenswrapper[4910]: I1125 22:30:37.122167 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v86bn\" (UniqueName: \"kubernetes.io/projected/e51f49da-a08c-442b-bdb7-ae6039b2ce69-kube-api-access-v86bn\") pod \"redhat-marketplace-s2lt8\" (UID: \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\") " pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:38 crc kubenswrapper[4910]: I1125 22:30:38.422286 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:39 crc kubenswrapper[4910]: I1125 22:30:39.031489 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s2lt8"] Nov 25 22:30:39 crc kubenswrapper[4910]: I1125 22:30:39.414196 4910 generic.go:334] "Generic (PLEG): container finished" podID="e51f49da-a08c-442b-bdb7-ae6039b2ce69" containerID="8d9bc25301ffe9ae6f3817a30688a8140f3a212a91a83e7ff4f68fe1f5ebb1c5" exitCode=0 Nov 25 22:30:39 crc kubenswrapper[4910]: I1125 22:30:39.414261 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s2lt8" event={"ID":"e51f49da-a08c-442b-bdb7-ae6039b2ce69","Type":"ContainerDied","Data":"8d9bc25301ffe9ae6f3817a30688a8140f3a212a91a83e7ff4f68fe1f5ebb1c5"} Nov 25 22:30:39 crc kubenswrapper[4910]: I1125 22:30:39.414639 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s2lt8" event={"ID":"e51f49da-a08c-442b-bdb7-ae6039b2ce69","Type":"ContainerStarted","Data":"cef793407b838c0ed69028d61374fdf34385d6ddaf0c732042b1c94b177469f4"} Nov 25 22:30:40 crc kubenswrapper[4910]: I1125 22:30:40.429562 4910 generic.go:334] "Generic (PLEG): container finished" podID="e51f49da-a08c-442b-bdb7-ae6039b2ce69" containerID="dfdfb9d95e840b8550ed879a8ee1a35e8ac9fce98ccc09275e9b876a69c4b205" exitCode=0 Nov 25 22:30:40 crc kubenswrapper[4910]: I1125 22:30:40.430513 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s2lt8" event={"ID":"e51f49da-a08c-442b-bdb7-ae6039b2ce69","Type":"ContainerDied","Data":"dfdfb9d95e840b8550ed879a8ee1a35e8ac9fce98ccc09275e9b876a69c4b205"} Nov 25 22:30:42 crc kubenswrapper[4910]: I1125 22:30:42.451719 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s2lt8" event={"ID":"e51f49da-a08c-442b-bdb7-ae6039b2ce69","Type":"ContainerStarted","Data":"af21e818136586e4587df34bc0027cda47ab0e364cef8f245b647c97da8a27f1"} Nov 25 22:30:42 crc kubenswrapper[4910]: I1125 22:30:42.481451 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-s2lt8" podStartSLOduration=4.012648822 podStartE2EDuration="6.481418315s" podCreationTimestamp="2025-11-25 22:30:36 +0000 UTC" firstStartedPulling="2025-11-25 22:30:39.41621691 +0000 UTC m=+3594.878693232" lastFinishedPulling="2025-11-25 22:30:41.884986403 +0000 UTC m=+3597.347462725" observedRunningTime="2025-11-25 22:30:42.473898815 +0000 UTC m=+3597.936375197" watchObservedRunningTime="2025-11-25 22:30:42.481418315 +0000 UTC m=+3597.943894637" Nov 25 22:30:46 crc kubenswrapper[4910]: I1125 22:30:46.204702 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:30:46 crc kubenswrapper[4910]: E1125 22:30:46.205561 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:30:48 crc kubenswrapper[4910]: I1125 22:30:48.422874 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:48 crc kubenswrapper[4910]: I1125 22:30:48.424145 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:48 crc kubenswrapper[4910]: I1125 22:30:48.485702 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:48 crc kubenswrapper[4910]: I1125 22:30:48.582014 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:50 crc kubenswrapper[4910]: I1125 22:30:50.697293 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5k64x"] Nov 25 22:30:50 crc kubenswrapper[4910]: I1125 22:30:50.699822 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:30:50 crc kubenswrapper[4910]: I1125 22:30:50.721401 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5k64x"] Nov 25 22:30:50 crc kubenswrapper[4910]: I1125 22:30:50.803398 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-utilities\") pod \"certified-operators-5k64x\" (UID: \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\") " pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:30:50 crc kubenswrapper[4910]: I1125 22:30:50.803650 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcvph\" (UniqueName: \"kubernetes.io/projected/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-kube-api-access-kcvph\") pod \"certified-operators-5k64x\" (UID: \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\") " pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:30:50 crc kubenswrapper[4910]: I1125 22:30:50.804011 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-catalog-content\") pod \"certified-operators-5k64x\" (UID: \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\") " pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:30:50 crc kubenswrapper[4910]: I1125 22:30:50.906793 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-utilities\") pod \"certified-operators-5k64x\" (UID: \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\") " pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:30:50 crc kubenswrapper[4910]: I1125 22:30:50.906875 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcvph\" (UniqueName: \"kubernetes.io/projected/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-kube-api-access-kcvph\") pod \"certified-operators-5k64x\" (UID: \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\") " pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:30:50 crc kubenswrapper[4910]: I1125 22:30:50.906939 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-catalog-content\") pod \"certified-operators-5k64x\" (UID: \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\") " pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:30:50 crc kubenswrapper[4910]: I1125 22:30:50.907599 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-catalog-content\") pod \"certified-operators-5k64x\" (UID: \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\") " pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:30:50 crc kubenswrapper[4910]: I1125 22:30:50.907801 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-utilities\") pod \"certified-operators-5k64x\" (UID: \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\") " pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:30:50 crc kubenswrapper[4910]: I1125 22:30:50.931055 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcvph\" (UniqueName: \"kubernetes.io/projected/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-kube-api-access-kcvph\") pod \"certified-operators-5k64x\" (UID: \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\") " pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:30:51 crc kubenswrapper[4910]: I1125 22:30:51.026807 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:30:51 crc kubenswrapper[4910]: I1125 22:30:51.728086 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5k64x"] Nov 25 22:30:52 crc kubenswrapper[4910]: I1125 22:30:52.585888 4910 generic.go:334] "Generic (PLEG): container finished" podID="ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" containerID="a2e78cb0e049df998e09300e3a229cfc650b214b19b3869335ebe358fdbb80ae" exitCode=0 Nov 25 22:30:52 crc kubenswrapper[4910]: I1125 22:30:52.585991 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5k64x" event={"ID":"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c","Type":"ContainerDied","Data":"a2e78cb0e049df998e09300e3a229cfc650b214b19b3869335ebe358fdbb80ae"} Nov 25 22:30:52 crc kubenswrapper[4910]: I1125 22:30:52.586304 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5k64x" event={"ID":"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c","Type":"ContainerStarted","Data":"9fc2d52bf47796cd19a3ad0d9ee5b09f5b419712ce0d9e7054e2a7d4b34006c4"} Nov 25 22:30:53 crc kubenswrapper[4910]: I1125 22:30:53.615311 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5k64x" event={"ID":"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c","Type":"ContainerStarted","Data":"75c687481bd6ee541f29ca6f5c87aea82a5013703d18056f875400ea9dae7fdc"} Nov 25 22:30:54 crc kubenswrapper[4910]: I1125 22:30:54.285360 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s2lt8"] Nov 25 22:30:54 crc kubenswrapper[4910]: I1125 22:30:54.285998 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-s2lt8" podUID="e51f49da-a08c-442b-bdb7-ae6039b2ce69" containerName="registry-server" containerID="cri-o://af21e818136586e4587df34bc0027cda47ab0e364cef8f245b647c97da8a27f1" gracePeriod=2 Nov 25 22:30:54 crc kubenswrapper[4910]: I1125 22:30:54.626801 4910 generic.go:334] "Generic (PLEG): container finished" podID="e51f49da-a08c-442b-bdb7-ae6039b2ce69" containerID="af21e818136586e4587df34bc0027cda47ab0e364cef8f245b647c97da8a27f1" exitCode=0 Nov 25 22:30:54 crc kubenswrapper[4910]: I1125 22:30:54.626925 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s2lt8" event={"ID":"e51f49da-a08c-442b-bdb7-ae6039b2ce69","Type":"ContainerDied","Data":"af21e818136586e4587df34bc0027cda47ab0e364cef8f245b647c97da8a27f1"} Nov 25 22:30:54 crc kubenswrapper[4910]: I1125 22:30:54.629451 4910 generic.go:334] "Generic (PLEG): container finished" podID="ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" containerID="75c687481bd6ee541f29ca6f5c87aea82a5013703d18056f875400ea9dae7fdc" exitCode=0 Nov 25 22:30:54 crc kubenswrapper[4910]: I1125 22:30:54.629506 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5k64x" event={"ID":"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c","Type":"ContainerDied","Data":"75c687481bd6ee541f29ca6f5c87aea82a5013703d18056f875400ea9dae7fdc"} Nov 25 22:30:54 crc kubenswrapper[4910]: I1125 22:30:54.796217 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:54 crc kubenswrapper[4910]: I1125 22:30:54.915526 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e51f49da-a08c-442b-bdb7-ae6039b2ce69-catalog-content\") pod \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\" (UID: \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\") " Nov 25 22:30:54 crc kubenswrapper[4910]: I1125 22:30:54.915592 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v86bn\" (UniqueName: \"kubernetes.io/projected/e51f49da-a08c-442b-bdb7-ae6039b2ce69-kube-api-access-v86bn\") pod \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\" (UID: \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\") " Nov 25 22:30:54 crc kubenswrapper[4910]: I1125 22:30:54.915707 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e51f49da-a08c-442b-bdb7-ae6039b2ce69-utilities\") pod \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\" (UID: \"e51f49da-a08c-442b-bdb7-ae6039b2ce69\") " Nov 25 22:30:54 crc kubenswrapper[4910]: I1125 22:30:54.916616 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e51f49da-a08c-442b-bdb7-ae6039b2ce69-utilities" (OuterVolumeSpecName: "utilities") pod "e51f49da-a08c-442b-bdb7-ae6039b2ce69" (UID: "e51f49da-a08c-442b-bdb7-ae6039b2ce69"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:30:54 crc kubenswrapper[4910]: I1125 22:30:54.930616 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e51f49da-a08c-442b-bdb7-ae6039b2ce69-kube-api-access-v86bn" (OuterVolumeSpecName: "kube-api-access-v86bn") pod "e51f49da-a08c-442b-bdb7-ae6039b2ce69" (UID: "e51f49da-a08c-442b-bdb7-ae6039b2ce69"). InnerVolumeSpecName "kube-api-access-v86bn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:30:54 crc kubenswrapper[4910]: I1125 22:30:54.947228 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e51f49da-a08c-442b-bdb7-ae6039b2ce69-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e51f49da-a08c-442b-bdb7-ae6039b2ce69" (UID: "e51f49da-a08c-442b-bdb7-ae6039b2ce69"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:30:55 crc kubenswrapper[4910]: I1125 22:30:55.017928 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e51f49da-a08c-442b-bdb7-ae6039b2ce69-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:30:55 crc kubenswrapper[4910]: I1125 22:30:55.017980 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v86bn\" (UniqueName: \"kubernetes.io/projected/e51f49da-a08c-442b-bdb7-ae6039b2ce69-kube-api-access-v86bn\") on node \"crc\" DevicePath \"\"" Nov 25 22:30:55 crc kubenswrapper[4910]: I1125 22:30:55.017999 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e51f49da-a08c-442b-bdb7-ae6039b2ce69-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:30:55 crc kubenswrapper[4910]: I1125 22:30:55.642467 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s2lt8" event={"ID":"e51f49da-a08c-442b-bdb7-ae6039b2ce69","Type":"ContainerDied","Data":"cef793407b838c0ed69028d61374fdf34385d6ddaf0c732042b1c94b177469f4"} Nov 25 22:30:55 crc kubenswrapper[4910]: I1125 22:30:55.642991 4910 scope.go:117] "RemoveContainer" containerID="af21e818136586e4587df34bc0027cda47ab0e364cef8f245b647c97da8a27f1" Nov 25 22:30:55 crc kubenswrapper[4910]: I1125 22:30:55.642518 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s2lt8" Nov 25 22:30:55 crc kubenswrapper[4910]: I1125 22:30:55.647786 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5k64x" event={"ID":"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c","Type":"ContainerStarted","Data":"07dcba05c3b1f78871dc02c76cac3c3b02b07b0dbe2001dd1cd072fe8fefe86a"} Nov 25 22:30:55 crc kubenswrapper[4910]: I1125 22:30:55.674404 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5k64x" podStartSLOduration=2.858309818 podStartE2EDuration="5.674385556s" podCreationTimestamp="2025-11-25 22:30:50 +0000 UTC" firstStartedPulling="2025-11-25 22:30:52.589025343 +0000 UTC m=+3608.051501665" lastFinishedPulling="2025-11-25 22:30:55.405101081 +0000 UTC m=+3610.867577403" observedRunningTime="2025-11-25 22:30:55.667990146 +0000 UTC m=+3611.130466468" watchObservedRunningTime="2025-11-25 22:30:55.674385556 +0000 UTC m=+3611.136861878" Nov 25 22:30:55 crc kubenswrapper[4910]: I1125 22:30:55.675679 4910 scope.go:117] "RemoveContainer" containerID="dfdfb9d95e840b8550ed879a8ee1a35e8ac9fce98ccc09275e9b876a69c4b205" Nov 25 22:30:55 crc kubenswrapper[4910]: I1125 22:30:55.697284 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s2lt8"] Nov 25 22:30:55 crc kubenswrapper[4910]: I1125 22:30:55.704648 4910 scope.go:117] "RemoveContainer" containerID="8d9bc25301ffe9ae6f3817a30688a8140f3a212a91a83e7ff4f68fe1f5ebb1c5" Nov 25 22:30:55 crc kubenswrapper[4910]: I1125 22:30:55.710615 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-s2lt8"] Nov 25 22:30:57 crc kubenswrapper[4910]: I1125 22:30:57.216993 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e51f49da-a08c-442b-bdb7-ae6039b2ce69" path="/var/lib/kubelet/pods/e51f49da-a08c-442b-bdb7-ae6039b2ce69/volumes" Nov 25 22:31:01 crc kubenswrapper[4910]: I1125 22:31:01.027429 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:31:01 crc kubenswrapper[4910]: I1125 22:31:01.028064 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:31:01 crc kubenswrapper[4910]: I1125 22:31:01.081947 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:31:01 crc kubenswrapper[4910]: I1125 22:31:01.207667 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:31:01 crc kubenswrapper[4910]: E1125 22:31:01.208068 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:31:01 crc kubenswrapper[4910]: I1125 22:31:01.766890 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:31:01 crc kubenswrapper[4910]: I1125 22:31:01.845072 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5k64x"] Nov 25 22:31:03 crc kubenswrapper[4910]: I1125 22:31:03.728570 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5k64x" podUID="ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" containerName="registry-server" containerID="cri-o://07dcba05c3b1f78871dc02c76cac3c3b02b07b0dbe2001dd1cd072fe8fefe86a" gracePeriod=2 Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.320440 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.442378 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-utilities\") pod \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\" (UID: \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\") " Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.442447 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-catalog-content\") pod \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\" (UID: \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\") " Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.442566 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kcvph\" (UniqueName: \"kubernetes.io/projected/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-kube-api-access-kcvph\") pod \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\" (UID: \"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c\") " Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.443684 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-utilities" (OuterVolumeSpecName: "utilities") pod "ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" (UID: "ee3854b5-a70a-42de-b017-9c1b3f4b6a1c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.466133 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-kube-api-access-kcvph" (OuterVolumeSpecName: "kube-api-access-kcvph") pod "ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" (UID: "ee3854b5-a70a-42de-b017-9c1b3f4b6a1c"). InnerVolumeSpecName "kube-api-access-kcvph". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.504358 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" (UID: "ee3854b5-a70a-42de-b017-9c1b3f4b6a1c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.545405 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.545450 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.545464 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kcvph\" (UniqueName: \"kubernetes.io/projected/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c-kube-api-access-kcvph\") on node \"crc\" DevicePath \"\"" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.742468 4910 generic.go:334] "Generic (PLEG): container finished" podID="ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" containerID="07dcba05c3b1f78871dc02c76cac3c3b02b07b0dbe2001dd1cd072fe8fefe86a" exitCode=0 Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.742544 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5k64x" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.742599 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5k64x" event={"ID":"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c","Type":"ContainerDied","Data":"07dcba05c3b1f78871dc02c76cac3c3b02b07b0dbe2001dd1cd072fe8fefe86a"} Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.744346 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5k64x" event={"ID":"ee3854b5-a70a-42de-b017-9c1b3f4b6a1c","Type":"ContainerDied","Data":"9fc2d52bf47796cd19a3ad0d9ee5b09f5b419712ce0d9e7054e2a7d4b34006c4"} Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.744382 4910 scope.go:117] "RemoveContainer" containerID="07dcba05c3b1f78871dc02c76cac3c3b02b07b0dbe2001dd1cd072fe8fefe86a" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.787888 4910 scope.go:117] "RemoveContainer" containerID="75c687481bd6ee541f29ca6f5c87aea82a5013703d18056f875400ea9dae7fdc" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.794172 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5k64x"] Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.805845 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5k64x"] Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.817174 4910 scope.go:117] "RemoveContainer" containerID="a2e78cb0e049df998e09300e3a229cfc650b214b19b3869335ebe358fdbb80ae" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.883648 4910 scope.go:117] "RemoveContainer" containerID="07dcba05c3b1f78871dc02c76cac3c3b02b07b0dbe2001dd1cd072fe8fefe86a" Nov 25 22:31:04 crc kubenswrapper[4910]: E1125 22:31:04.884322 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"07dcba05c3b1f78871dc02c76cac3c3b02b07b0dbe2001dd1cd072fe8fefe86a\": container with ID starting with 07dcba05c3b1f78871dc02c76cac3c3b02b07b0dbe2001dd1cd072fe8fefe86a not found: ID does not exist" containerID="07dcba05c3b1f78871dc02c76cac3c3b02b07b0dbe2001dd1cd072fe8fefe86a" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.884372 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"07dcba05c3b1f78871dc02c76cac3c3b02b07b0dbe2001dd1cd072fe8fefe86a"} err="failed to get container status \"07dcba05c3b1f78871dc02c76cac3c3b02b07b0dbe2001dd1cd072fe8fefe86a\": rpc error: code = NotFound desc = could not find container \"07dcba05c3b1f78871dc02c76cac3c3b02b07b0dbe2001dd1cd072fe8fefe86a\": container with ID starting with 07dcba05c3b1f78871dc02c76cac3c3b02b07b0dbe2001dd1cd072fe8fefe86a not found: ID does not exist" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.884407 4910 scope.go:117] "RemoveContainer" containerID="75c687481bd6ee541f29ca6f5c87aea82a5013703d18056f875400ea9dae7fdc" Nov 25 22:31:04 crc kubenswrapper[4910]: E1125 22:31:04.884807 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75c687481bd6ee541f29ca6f5c87aea82a5013703d18056f875400ea9dae7fdc\": container with ID starting with 75c687481bd6ee541f29ca6f5c87aea82a5013703d18056f875400ea9dae7fdc not found: ID does not exist" containerID="75c687481bd6ee541f29ca6f5c87aea82a5013703d18056f875400ea9dae7fdc" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.884837 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75c687481bd6ee541f29ca6f5c87aea82a5013703d18056f875400ea9dae7fdc"} err="failed to get container status \"75c687481bd6ee541f29ca6f5c87aea82a5013703d18056f875400ea9dae7fdc\": rpc error: code = NotFound desc = could not find container \"75c687481bd6ee541f29ca6f5c87aea82a5013703d18056f875400ea9dae7fdc\": container with ID starting with 75c687481bd6ee541f29ca6f5c87aea82a5013703d18056f875400ea9dae7fdc not found: ID does not exist" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.884855 4910 scope.go:117] "RemoveContainer" containerID="a2e78cb0e049df998e09300e3a229cfc650b214b19b3869335ebe358fdbb80ae" Nov 25 22:31:04 crc kubenswrapper[4910]: E1125 22:31:04.885709 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2e78cb0e049df998e09300e3a229cfc650b214b19b3869335ebe358fdbb80ae\": container with ID starting with a2e78cb0e049df998e09300e3a229cfc650b214b19b3869335ebe358fdbb80ae not found: ID does not exist" containerID="a2e78cb0e049df998e09300e3a229cfc650b214b19b3869335ebe358fdbb80ae" Nov 25 22:31:04 crc kubenswrapper[4910]: I1125 22:31:04.885736 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2e78cb0e049df998e09300e3a229cfc650b214b19b3869335ebe358fdbb80ae"} err="failed to get container status \"a2e78cb0e049df998e09300e3a229cfc650b214b19b3869335ebe358fdbb80ae\": rpc error: code = NotFound desc = could not find container \"a2e78cb0e049df998e09300e3a229cfc650b214b19b3869335ebe358fdbb80ae\": container with ID starting with a2e78cb0e049df998e09300e3a229cfc650b214b19b3869335ebe358fdbb80ae not found: ID does not exist" Nov 25 22:31:05 crc kubenswrapper[4910]: I1125 22:31:05.226953 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" path="/var/lib/kubelet/pods/ee3854b5-a70a-42de-b017-9c1b3f4b6a1c/volumes" Nov 25 22:31:06 crc kubenswrapper[4910]: I1125 22:31:06.779458 4910 generic.go:334] "Generic (PLEG): container finished" podID="eef2ce10-a3a7-4475-9fa5-63614673cf7a" containerID="1dbe301091e89d355a7757858dfbaa82c96bd8bf45f405fd338f64adc9c41302" exitCode=0 Nov 25 22:31:06 crc kubenswrapper[4910]: I1125 22:31:06.779697 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lqrwm/crc-debug-dvn7k" event={"ID":"eef2ce10-a3a7-4475-9fa5-63614673cf7a","Type":"ContainerDied","Data":"1dbe301091e89d355a7757858dfbaa82c96bd8bf45f405fd338f64adc9c41302"} Nov 25 22:31:07 crc kubenswrapper[4910]: I1125 22:31:07.934729 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/crc-debug-dvn7k" Nov 25 22:31:07 crc kubenswrapper[4910]: I1125 22:31:07.991854 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-lqrwm/crc-debug-dvn7k"] Nov 25 22:31:08 crc kubenswrapper[4910]: I1125 22:31:08.003420 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-lqrwm/crc-debug-dvn7k"] Nov 25 22:31:08 crc kubenswrapper[4910]: I1125 22:31:08.041677 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/eef2ce10-a3a7-4475-9fa5-63614673cf7a-host\") pod \"eef2ce10-a3a7-4475-9fa5-63614673cf7a\" (UID: \"eef2ce10-a3a7-4475-9fa5-63614673cf7a\") " Nov 25 22:31:08 crc kubenswrapper[4910]: I1125 22:31:08.041987 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kgmx9\" (UniqueName: \"kubernetes.io/projected/eef2ce10-a3a7-4475-9fa5-63614673cf7a-kube-api-access-kgmx9\") pod \"eef2ce10-a3a7-4475-9fa5-63614673cf7a\" (UID: \"eef2ce10-a3a7-4475-9fa5-63614673cf7a\") " Nov 25 22:31:08 crc kubenswrapper[4910]: I1125 22:31:08.042187 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/eef2ce10-a3a7-4475-9fa5-63614673cf7a-host" (OuterVolumeSpecName: "host") pod "eef2ce10-a3a7-4475-9fa5-63614673cf7a" (UID: "eef2ce10-a3a7-4475-9fa5-63614673cf7a"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 22:31:08 crc kubenswrapper[4910]: I1125 22:31:08.043050 4910 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/eef2ce10-a3a7-4475-9fa5-63614673cf7a-host\") on node \"crc\" DevicePath \"\"" Nov 25 22:31:08 crc kubenswrapper[4910]: I1125 22:31:08.049453 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eef2ce10-a3a7-4475-9fa5-63614673cf7a-kube-api-access-kgmx9" (OuterVolumeSpecName: "kube-api-access-kgmx9") pod "eef2ce10-a3a7-4475-9fa5-63614673cf7a" (UID: "eef2ce10-a3a7-4475-9fa5-63614673cf7a"). InnerVolumeSpecName "kube-api-access-kgmx9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:31:08 crc kubenswrapper[4910]: I1125 22:31:08.145002 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kgmx9\" (UniqueName: \"kubernetes.io/projected/eef2ce10-a3a7-4475-9fa5-63614673cf7a-kube-api-access-kgmx9\") on node \"crc\" DevicePath \"\"" Nov 25 22:31:08 crc kubenswrapper[4910]: I1125 22:31:08.811319 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a3a1581777b2b8c93de6a938eb0b7c32b0c58d187daa19755cd14910b2bb885" Nov 25 22:31:08 crc kubenswrapper[4910]: I1125 22:31:08.811624 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/crc-debug-dvn7k" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.228383 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eef2ce10-a3a7-4475-9fa5-63614673cf7a" path="/var/lib/kubelet/pods/eef2ce10-a3a7-4475-9fa5-63614673cf7a/volumes" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.263860 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-lqrwm/crc-debug-6zsh5"] Nov 25 22:31:09 crc kubenswrapper[4910]: E1125 22:31:09.264629 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" containerName="extract-content" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.264664 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" containerName="extract-content" Nov 25 22:31:09 crc kubenswrapper[4910]: E1125 22:31:09.264762 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" containerName="extract-utilities" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.264776 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" containerName="extract-utilities" Nov 25 22:31:09 crc kubenswrapper[4910]: E1125 22:31:09.264812 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eef2ce10-a3a7-4475-9fa5-63614673cf7a" containerName="container-00" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.264823 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="eef2ce10-a3a7-4475-9fa5-63614673cf7a" containerName="container-00" Nov 25 22:31:09 crc kubenswrapper[4910]: E1125 22:31:09.264873 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e51f49da-a08c-442b-bdb7-ae6039b2ce69" containerName="extract-utilities" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.264884 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e51f49da-a08c-442b-bdb7-ae6039b2ce69" containerName="extract-utilities" Nov 25 22:31:09 crc kubenswrapper[4910]: E1125 22:31:09.264909 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e51f49da-a08c-442b-bdb7-ae6039b2ce69" containerName="extract-content" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.264921 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e51f49da-a08c-442b-bdb7-ae6039b2ce69" containerName="extract-content" Nov 25 22:31:09 crc kubenswrapper[4910]: E1125 22:31:09.264936 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e51f49da-a08c-442b-bdb7-ae6039b2ce69" containerName="registry-server" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.264947 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="e51f49da-a08c-442b-bdb7-ae6039b2ce69" containerName="registry-server" Nov 25 22:31:09 crc kubenswrapper[4910]: E1125 22:31:09.264964 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" containerName="registry-server" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.264976 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" containerName="registry-server" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.265521 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="e51f49da-a08c-442b-bdb7-ae6039b2ce69" containerName="registry-server" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.265566 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee3854b5-a70a-42de-b017-9c1b3f4b6a1c" containerName="registry-server" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.265599 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="eef2ce10-a3a7-4475-9fa5-63614673cf7a" containerName="container-00" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.266852 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/crc-debug-6zsh5" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.379381 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lgqz\" (UniqueName: \"kubernetes.io/projected/5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257-kube-api-access-8lgqz\") pod \"crc-debug-6zsh5\" (UID: \"5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257\") " pod="openshift-must-gather-lqrwm/crc-debug-6zsh5" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.379469 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257-host\") pod \"crc-debug-6zsh5\" (UID: \"5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257\") " pod="openshift-must-gather-lqrwm/crc-debug-6zsh5" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.482900 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lgqz\" (UniqueName: \"kubernetes.io/projected/5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257-kube-api-access-8lgqz\") pod \"crc-debug-6zsh5\" (UID: \"5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257\") " pod="openshift-must-gather-lqrwm/crc-debug-6zsh5" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.482998 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257-host\") pod \"crc-debug-6zsh5\" (UID: \"5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257\") " pod="openshift-must-gather-lqrwm/crc-debug-6zsh5" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.483295 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257-host\") pod \"crc-debug-6zsh5\" (UID: \"5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257\") " pod="openshift-must-gather-lqrwm/crc-debug-6zsh5" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.520810 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lgqz\" (UniqueName: \"kubernetes.io/projected/5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257-kube-api-access-8lgqz\") pod \"crc-debug-6zsh5\" (UID: \"5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257\") " pod="openshift-must-gather-lqrwm/crc-debug-6zsh5" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.588304 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/crc-debug-6zsh5" Nov 25 22:31:09 crc kubenswrapper[4910]: I1125 22:31:09.826990 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lqrwm/crc-debug-6zsh5" event={"ID":"5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257","Type":"ContainerStarted","Data":"8559c493d534a3bc2fa2f82902238d0ea270875779e95f3713b7af0b3d2263b6"} Nov 25 22:31:10 crc kubenswrapper[4910]: I1125 22:31:10.844410 4910 generic.go:334] "Generic (PLEG): container finished" podID="5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257" containerID="a0fe97f40192cfa5401b2f79e212022ae73459b3f984e276f4adca17e93d0c50" exitCode=0 Nov 25 22:31:10 crc kubenswrapper[4910]: I1125 22:31:10.844476 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lqrwm/crc-debug-6zsh5" event={"ID":"5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257","Type":"ContainerDied","Data":"a0fe97f40192cfa5401b2f79e212022ae73459b3f984e276f4adca17e93d0c50"} Nov 25 22:31:11 crc kubenswrapper[4910]: I1125 22:31:11.592759 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-lqrwm/crc-debug-6zsh5"] Nov 25 22:31:11 crc kubenswrapper[4910]: I1125 22:31:11.601357 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-lqrwm/crc-debug-6zsh5"] Nov 25 22:31:11 crc kubenswrapper[4910]: I1125 22:31:11.977612 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/crc-debug-6zsh5" Nov 25 22:31:12 crc kubenswrapper[4910]: I1125 22:31:12.047181 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257-host\") pod \"5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257\" (UID: \"5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257\") " Nov 25 22:31:12 crc kubenswrapper[4910]: I1125 22:31:12.047733 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8lgqz\" (UniqueName: \"kubernetes.io/projected/5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257-kube-api-access-8lgqz\") pod \"5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257\" (UID: \"5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257\") " Nov 25 22:31:12 crc kubenswrapper[4910]: I1125 22:31:12.050108 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257-host" (OuterVolumeSpecName: "host") pod "5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257" (UID: "5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 22:31:12 crc kubenswrapper[4910]: I1125 22:31:12.056466 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257-kube-api-access-8lgqz" (OuterVolumeSpecName: "kube-api-access-8lgqz") pod "5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257" (UID: "5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257"). InnerVolumeSpecName "kube-api-access-8lgqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:31:12 crc kubenswrapper[4910]: I1125 22:31:12.151023 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8lgqz\" (UniqueName: \"kubernetes.io/projected/5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257-kube-api-access-8lgqz\") on node \"crc\" DevicePath \"\"" Nov 25 22:31:12 crc kubenswrapper[4910]: I1125 22:31:12.151072 4910 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257-host\") on node \"crc\" DevicePath \"\"" Nov 25 22:31:12 crc kubenswrapper[4910]: I1125 22:31:12.846929 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-lqrwm/crc-debug-rctnl"] Nov 25 22:31:12 crc kubenswrapper[4910]: E1125 22:31:12.847633 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257" containerName="container-00" Nov 25 22:31:12 crc kubenswrapper[4910]: I1125 22:31:12.847662 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257" containerName="container-00" Nov 25 22:31:12 crc kubenswrapper[4910]: I1125 22:31:12.848097 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257" containerName="container-00" Nov 25 22:31:12 crc kubenswrapper[4910]: I1125 22:31:12.849267 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/crc-debug-rctnl" Nov 25 22:31:12 crc kubenswrapper[4910]: I1125 22:31:12.872993 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8559c493d534a3bc2fa2f82902238d0ea270875779e95f3713b7af0b3d2263b6" Nov 25 22:31:12 crc kubenswrapper[4910]: I1125 22:31:12.873064 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/crc-debug-6zsh5" Nov 25 22:31:12 crc kubenswrapper[4910]: I1125 22:31:12.973436 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3d5c098b-e243-4068-9e49-bc557af194f6-host\") pod \"crc-debug-rctnl\" (UID: \"3d5c098b-e243-4068-9e49-bc557af194f6\") " pod="openshift-must-gather-lqrwm/crc-debug-rctnl" Nov 25 22:31:12 crc kubenswrapper[4910]: I1125 22:31:12.973750 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nt5zs\" (UniqueName: \"kubernetes.io/projected/3d5c098b-e243-4068-9e49-bc557af194f6-kube-api-access-nt5zs\") pod \"crc-debug-rctnl\" (UID: \"3d5c098b-e243-4068-9e49-bc557af194f6\") " pod="openshift-must-gather-lqrwm/crc-debug-rctnl" Nov 25 22:31:13 crc kubenswrapper[4910]: I1125 22:31:13.077316 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3d5c098b-e243-4068-9e49-bc557af194f6-host\") pod \"crc-debug-rctnl\" (UID: \"3d5c098b-e243-4068-9e49-bc557af194f6\") " pod="openshift-must-gather-lqrwm/crc-debug-rctnl" Nov 25 22:31:13 crc kubenswrapper[4910]: I1125 22:31:13.077661 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3d5c098b-e243-4068-9e49-bc557af194f6-host\") pod \"crc-debug-rctnl\" (UID: \"3d5c098b-e243-4068-9e49-bc557af194f6\") " pod="openshift-must-gather-lqrwm/crc-debug-rctnl" Nov 25 22:31:13 crc kubenswrapper[4910]: I1125 22:31:13.078156 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nt5zs\" (UniqueName: \"kubernetes.io/projected/3d5c098b-e243-4068-9e49-bc557af194f6-kube-api-access-nt5zs\") pod \"crc-debug-rctnl\" (UID: \"3d5c098b-e243-4068-9e49-bc557af194f6\") " pod="openshift-must-gather-lqrwm/crc-debug-rctnl" Nov 25 22:31:13 crc kubenswrapper[4910]: I1125 22:31:13.113212 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nt5zs\" (UniqueName: \"kubernetes.io/projected/3d5c098b-e243-4068-9e49-bc557af194f6-kube-api-access-nt5zs\") pod \"crc-debug-rctnl\" (UID: \"3d5c098b-e243-4068-9e49-bc557af194f6\") " pod="openshift-must-gather-lqrwm/crc-debug-rctnl" Nov 25 22:31:13 crc kubenswrapper[4910]: I1125 22:31:13.178631 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/crc-debug-rctnl" Nov 25 22:31:13 crc kubenswrapper[4910]: I1125 22:31:13.205986 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:31:13 crc kubenswrapper[4910]: E1125 22:31:13.206487 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:31:13 crc kubenswrapper[4910]: I1125 22:31:13.223935 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257" path="/var/lib/kubelet/pods/5bf3b01e-e8a4-4891-a1fe-3ab4ed3db257/volumes" Nov 25 22:31:13 crc kubenswrapper[4910]: W1125 22:31:13.230084 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d5c098b_e243_4068_9e49_bc557af194f6.slice/crio-0d878fb2eb57632ff88754ed1edfa67e11e1411c356bcec70ce7b1846161088d WatchSource:0}: Error finding container 0d878fb2eb57632ff88754ed1edfa67e11e1411c356bcec70ce7b1846161088d: Status 404 returned error can't find the container with id 0d878fb2eb57632ff88754ed1edfa67e11e1411c356bcec70ce7b1846161088d Nov 25 22:31:13 crc kubenswrapper[4910]: I1125 22:31:13.889968 4910 generic.go:334] "Generic (PLEG): container finished" podID="3d5c098b-e243-4068-9e49-bc557af194f6" containerID="de87ab473314b2eac663a29a7b3dfe42634e73445a6aa107015662cadb9b0c92" exitCode=0 Nov 25 22:31:13 crc kubenswrapper[4910]: I1125 22:31:13.890079 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lqrwm/crc-debug-rctnl" event={"ID":"3d5c098b-e243-4068-9e49-bc557af194f6","Type":"ContainerDied","Data":"de87ab473314b2eac663a29a7b3dfe42634e73445a6aa107015662cadb9b0c92"} Nov 25 22:31:13 crc kubenswrapper[4910]: I1125 22:31:13.890714 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lqrwm/crc-debug-rctnl" event={"ID":"3d5c098b-e243-4068-9e49-bc557af194f6","Type":"ContainerStarted","Data":"0d878fb2eb57632ff88754ed1edfa67e11e1411c356bcec70ce7b1846161088d"} Nov 25 22:31:13 crc kubenswrapper[4910]: I1125 22:31:13.948452 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-lqrwm/crc-debug-rctnl"] Nov 25 22:31:13 crc kubenswrapper[4910]: I1125 22:31:13.975452 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-lqrwm/crc-debug-rctnl"] Nov 25 22:31:15 crc kubenswrapper[4910]: I1125 22:31:15.047237 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/crc-debug-rctnl" Nov 25 22:31:15 crc kubenswrapper[4910]: I1125 22:31:15.122312 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nt5zs\" (UniqueName: \"kubernetes.io/projected/3d5c098b-e243-4068-9e49-bc557af194f6-kube-api-access-nt5zs\") pod \"3d5c098b-e243-4068-9e49-bc557af194f6\" (UID: \"3d5c098b-e243-4068-9e49-bc557af194f6\") " Nov 25 22:31:15 crc kubenswrapper[4910]: I1125 22:31:15.122811 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3d5c098b-e243-4068-9e49-bc557af194f6-host\") pod \"3d5c098b-e243-4068-9e49-bc557af194f6\" (UID: \"3d5c098b-e243-4068-9e49-bc557af194f6\") " Nov 25 22:31:15 crc kubenswrapper[4910]: I1125 22:31:15.123014 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3d5c098b-e243-4068-9e49-bc557af194f6-host" (OuterVolumeSpecName: "host") pod "3d5c098b-e243-4068-9e49-bc557af194f6" (UID: "3d5c098b-e243-4068-9e49-bc557af194f6"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 22:31:15 crc kubenswrapper[4910]: I1125 22:31:15.124221 4910 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3d5c098b-e243-4068-9e49-bc557af194f6-host\") on node \"crc\" DevicePath \"\"" Nov 25 22:31:15 crc kubenswrapper[4910]: I1125 22:31:15.140479 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d5c098b-e243-4068-9e49-bc557af194f6-kube-api-access-nt5zs" (OuterVolumeSpecName: "kube-api-access-nt5zs") pod "3d5c098b-e243-4068-9e49-bc557af194f6" (UID: "3d5c098b-e243-4068-9e49-bc557af194f6"). InnerVolumeSpecName "kube-api-access-nt5zs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:31:15 crc kubenswrapper[4910]: I1125 22:31:15.217485 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d5c098b-e243-4068-9e49-bc557af194f6" path="/var/lib/kubelet/pods/3d5c098b-e243-4068-9e49-bc557af194f6/volumes" Nov 25 22:31:15 crc kubenswrapper[4910]: I1125 22:31:15.227133 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nt5zs\" (UniqueName: \"kubernetes.io/projected/3d5c098b-e243-4068-9e49-bc557af194f6-kube-api-access-nt5zs\") on node \"crc\" DevicePath \"\"" Nov 25 22:31:15 crc kubenswrapper[4910]: I1125 22:31:15.916224 4910 scope.go:117] "RemoveContainer" containerID="de87ab473314b2eac663a29a7b3dfe42634e73445a6aa107015662cadb9b0c92" Nov 25 22:31:15 crc kubenswrapper[4910]: I1125 22:31:15.916236 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/crc-debug-rctnl" Nov 25 22:31:25 crc kubenswrapper[4910]: I1125 22:31:25.221232 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:31:25 crc kubenswrapper[4910]: E1125 22:31:25.222605 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:31:30 crc kubenswrapper[4910]: I1125 22:31:30.987773 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7ccccf649d-9sm5c_fe39bdcb-17f5-455e-89af-d161d0d651fc/barbican-api/0.log" Nov 25 22:31:31 crc kubenswrapper[4910]: I1125 22:31:31.206445 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7ccccf649d-9sm5c_fe39bdcb-17f5-455e-89af-d161d0d651fc/barbican-api-log/0.log" Nov 25 22:31:31 crc kubenswrapper[4910]: I1125 22:31:31.227925 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-865b64f5bb-fdgzg_7193b97a-2be1-4f8f-9e84-abb09908f78c/barbican-keystone-listener/0.log" Nov 25 22:31:31 crc kubenswrapper[4910]: I1125 22:31:31.284330 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-865b64f5bb-fdgzg_7193b97a-2be1-4f8f-9e84-abb09908f78c/barbican-keystone-listener-log/0.log" Nov 25 22:31:31 crc kubenswrapper[4910]: I1125 22:31:31.401524 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7fdcb7f4c9-g5zfq_463bc99a-ad40-4df5-9b99-d10d0af67cea/barbican-worker/0.log" Nov 25 22:31:31 crc kubenswrapper[4910]: I1125 22:31:31.458016 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7fdcb7f4c9-g5zfq_463bc99a-ad40-4df5-9b99-d10d0af67cea/barbican-worker-log/0.log" Nov 25 22:31:31 crc kubenswrapper[4910]: I1125 22:31:31.663636 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh_6fbaf31f-bfe9-4f0a-a064-75d015480249/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:31 crc kubenswrapper[4910]: I1125 22:31:31.995279 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f8fbe2b4-66f6-440d-8cdd-04534f6069ad/ceilometer-central-agent/0.log" Nov 25 22:31:32 crc kubenswrapper[4910]: I1125 22:31:32.076529 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f8fbe2b4-66f6-440d-8cdd-04534f6069ad/ceilometer-notification-agent/0.log" Nov 25 22:31:32 crc kubenswrapper[4910]: I1125 22:31:32.125295 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f8fbe2b4-66f6-440d-8cdd-04534f6069ad/proxy-httpd/0.log" Nov 25 22:31:32 crc kubenswrapper[4910]: I1125 22:31:32.213420 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f8fbe2b4-66f6-440d-8cdd-04534f6069ad/sg-core/0.log" Nov 25 22:31:32 crc kubenswrapper[4910]: I1125 22:31:32.347138 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_24fe78bf-5d43-4896-b226-8d33a8856a13/cinder-api/0.log" Nov 25 22:31:32 crc kubenswrapper[4910]: I1125 22:31:32.423859 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_24fe78bf-5d43-4896-b226-8d33a8856a13/cinder-api-log/0.log" Nov 25 22:31:32 crc kubenswrapper[4910]: I1125 22:31:32.609730 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_f9dafbee-be84-4df3-a1d1-6ff36015ec46/cinder-scheduler/0.log" Nov 25 22:31:32 crc kubenswrapper[4910]: I1125 22:31:32.657957 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_f9dafbee-be84-4df3-a1d1-6ff36015ec46/probe/0.log" Nov 25 22:31:32 crc kubenswrapper[4910]: I1125 22:31:32.814133 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz_dc112a56-de9c-47b3-8ca9-c0225469f85c/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:32 crc kubenswrapper[4910]: I1125 22:31:32.937314 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j_6552e880-8d31-43fc-9fee-d2e33c2ca987/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:33 crc kubenswrapper[4910]: I1125 22:31:33.042808 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-8c6f6df99-9qdqp_fe3aae4c-2f2b-42be-b179-105323fa0957/init/0.log" Nov 25 22:31:33 crc kubenswrapper[4910]: I1125 22:31:33.259026 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-8c6f6df99-9qdqp_fe3aae4c-2f2b-42be-b179-105323fa0957/init/0.log" Nov 25 22:31:33 crc kubenswrapper[4910]: I1125 22:31:33.290521 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-8c6f6df99-9qdqp_fe3aae4c-2f2b-42be-b179-105323fa0957/dnsmasq-dns/0.log" Nov 25 22:31:33 crc kubenswrapper[4910]: I1125 22:31:33.340464 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-24hvs_a6f639c3-729d-4c6a-9e97-afb151569af5/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:33 crc kubenswrapper[4910]: I1125 22:31:33.555165 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_5b3e95ca-7b13-4baf-98f1-465aa3b31a2c/glance-httpd/0.log" Nov 25 22:31:33 crc kubenswrapper[4910]: I1125 22:31:33.634404 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_5b3e95ca-7b13-4baf-98f1-465aa3b31a2c/glance-log/0.log" Nov 25 22:31:33 crc kubenswrapper[4910]: I1125 22:31:33.814582 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_b9542bd3-e5b7-44e2-84bb-11b34d1fc44b/glance-httpd/0.log" Nov 25 22:31:33 crc kubenswrapper[4910]: I1125 22:31:33.845008 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_b9542bd3-e5b7-44e2-84bb-11b34d1fc44b/glance-log/0.log" Nov 25 22:31:34 crc kubenswrapper[4910]: I1125 22:31:34.019883 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-657976db8d-swkbt_7941e190-b648-4b11-946b-dddaa1bc98d9/horizon/0.log" Nov 25 22:31:34 crc kubenswrapper[4910]: I1125 22:31:34.157831 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv_ef782e7e-3e6a-41ed-a9b1-343be0faecc3/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:34 crc kubenswrapper[4910]: I1125 22:31:34.326613 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-657976db8d-swkbt_7941e190-b648-4b11-946b-dddaa1bc98d9/horizon-log/0.log" Nov 25 22:31:34 crc kubenswrapper[4910]: I1125 22:31:34.373300 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-sp4g4_941bb6aa-1438-4ed4-8ed3-3e834a784a79/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:34 crc kubenswrapper[4910]: I1125 22:31:34.567224 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401801-dfmbk_d759f7d3-5701-4d72-9df3-2509819d80f2/keystone-cron/0.log" Nov 25 22:31:34 crc kubenswrapper[4910]: I1125 22:31:34.777330 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_eaec4e2c-bb9a-4c1f-80d5-c93dce82233e/kube-state-metrics/0.log" Nov 25 22:31:34 crc kubenswrapper[4910]: I1125 22:31:34.814863 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-f7657d4c8-n2wbh_1fc29606-ff34-4170-859a-8357838d9b65/keystone-api/0.log" Nov 25 22:31:34 crc kubenswrapper[4910]: I1125 22:31:34.942686 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-h78fk_1132a133-2fdf-4a87-b132-d1f1c0a26c76/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:35 crc kubenswrapper[4910]: I1125 22:31:35.521417 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-667bcb4bc9-bl288_7935f6eb-171e-43a8-9f6c-6bf62769ade6/neutron-httpd/0.log" Nov 25 22:31:35 crc kubenswrapper[4910]: I1125 22:31:35.552200 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-667bcb4bc9-bl288_7935f6eb-171e-43a8-9f6c-6bf62769ade6/neutron-api/0.log" Nov 25 22:31:35 crc kubenswrapper[4910]: I1125 22:31:35.591189 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj_ed6d4c0f-684e-4174-ad4c-5f034025d52a/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:36 crc kubenswrapper[4910]: I1125 22:31:36.275602 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_533ec0e8-93b1-4468-8b7f-72071aa8be27/nova-cell0-conductor-conductor/0.log" Nov 25 22:31:36 crc kubenswrapper[4910]: I1125 22:31:36.277205 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_caf92f8f-f8b6-4214-8b76-13cfe6bafd4a/nova-api-log/0.log" Nov 25 22:31:36 crc kubenswrapper[4910]: I1125 22:31:36.523329 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_cc5eb389-8176-4989-8f45-a7a9631b286b/nova-cell1-conductor-conductor/0.log" Nov 25 22:31:36 crc kubenswrapper[4910]: I1125 22:31:36.535100 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_caf92f8f-f8b6-4214-8b76-13cfe6bafd4a/nova-api-api/0.log" Nov 25 22:31:36 crc kubenswrapper[4910]: I1125 22:31:36.657834 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_af1314f5-7ef2-46dd-b56d-3320375af199/nova-cell1-novncproxy-novncproxy/0.log" Nov 25 22:31:36 crc kubenswrapper[4910]: I1125 22:31:36.803263 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-sv2kk_0a190739-9d08-41b3-a45d-42d0b636ccad/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:36 crc kubenswrapper[4910]: I1125 22:31:36.966572 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_2b80da77-dd73-4886-bcd1-88fb1c484af1/nova-metadata-log/0.log" Nov 25 22:31:37 crc kubenswrapper[4910]: I1125 22:31:37.204071 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:31:37 crc kubenswrapper[4910]: E1125 22:31:37.204387 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:31:37 crc kubenswrapper[4910]: I1125 22:31:37.409311 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_8dba0933-64a2-4286-baee-149ebff5c09d/nova-scheduler-scheduler/0.log" Nov 25 22:31:37 crc kubenswrapper[4910]: I1125 22:31:37.434941 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_62283554-0498-4bac-b223-8d3c6d21b614/mysql-bootstrap/0.log" Nov 25 22:31:37 crc kubenswrapper[4910]: I1125 22:31:37.718964 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_62283554-0498-4bac-b223-8d3c6d21b614/mysql-bootstrap/0.log" Nov 25 22:31:37 crc kubenswrapper[4910]: I1125 22:31:37.736497 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_62283554-0498-4bac-b223-8d3c6d21b614/galera/0.log" Nov 25 22:31:37 crc kubenswrapper[4910]: I1125 22:31:37.928495 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc/mysql-bootstrap/0.log" Nov 25 22:31:38 crc kubenswrapper[4910]: I1125 22:31:38.228698 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc/mysql-bootstrap/0.log" Nov 25 22:31:38 crc kubenswrapper[4910]: I1125 22:31:38.268435 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc/galera/0.log" Nov 25 22:31:38 crc kubenswrapper[4910]: I1125 22:31:38.449645 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_e36f2312-81e7-4b57-9131-695681724f08/openstackclient/0.log" Nov 25 22:31:38 crc kubenswrapper[4910]: I1125 22:31:38.521286 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_2b80da77-dd73-4886-bcd1-88fb1c484af1/nova-metadata-metadata/0.log" Nov 25 22:31:38 crc kubenswrapper[4910]: I1125 22:31:38.599036 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-554dc_5d3afe23-a5d2-4f9c-bdaa-f80020ef6226/ovn-controller/0.log" Nov 25 22:31:38 crc kubenswrapper[4910]: I1125 22:31:38.791013 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-lz25t_d73b450a-c8fd-47c7-918c-273ae5d10b8a/openstack-network-exporter/0.log" Nov 25 22:31:38 crc kubenswrapper[4910]: I1125 22:31:38.900853 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dbkwd_d271e423-f378-4368-b055-d89cea058d38/ovsdb-server-init/0.log" Nov 25 22:31:39 crc kubenswrapper[4910]: I1125 22:31:39.371038 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dbkwd_d271e423-f378-4368-b055-d89cea058d38/ovsdb-server-init/0.log" Nov 25 22:31:39 crc kubenswrapper[4910]: I1125 22:31:39.374848 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dbkwd_d271e423-f378-4368-b055-d89cea058d38/ovsdb-server/0.log" Nov 25 22:31:39 crc kubenswrapper[4910]: I1125 22:31:39.384018 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dbkwd_d271e423-f378-4368-b055-d89cea058d38/ovs-vswitchd/0.log" Nov 25 22:31:39 crc kubenswrapper[4910]: I1125 22:31:39.652394 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_944fb5f5-a2bc-4328-bbec-203fbfb6cd20/openstack-network-exporter/0.log" Nov 25 22:31:39 crc kubenswrapper[4910]: I1125 22:31:39.708285 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-p7pqv_0ab610e6-8ab3-4c4c-83fa-5ce52795f545/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:39 crc kubenswrapper[4910]: I1125 22:31:39.773919 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_944fb5f5-a2bc-4328-bbec-203fbfb6cd20/ovn-northd/0.log" Nov 25 22:31:39 crc kubenswrapper[4910]: I1125 22:31:39.917331 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_d4285011-1eac-4f3c-af27-c6c6ad03d8de/ovsdbserver-nb/0.log" Nov 25 22:31:39 crc kubenswrapper[4910]: I1125 22:31:39.924124 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_d4285011-1eac-4f3c-af27-c6c6ad03d8de/openstack-network-exporter/0.log" Nov 25 22:31:40 crc kubenswrapper[4910]: I1125 22:31:40.164069 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_d7e886f1-04bd-4061-9a6c-18a20a1d7cbe/openstack-network-exporter/0.log" Nov 25 22:31:40 crc kubenswrapper[4910]: I1125 22:31:40.199704 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_d7e886f1-04bd-4061-9a6c-18a20a1d7cbe/ovsdbserver-sb/0.log" Nov 25 22:31:40 crc kubenswrapper[4910]: I1125 22:31:40.473748 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-64ff96875d-p4n97_d7f10efc-4222-4871-b684-dc482fd27b01/placement-log/0.log" Nov 25 22:31:40 crc kubenswrapper[4910]: I1125 22:31:40.537744 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-64ff96875d-p4n97_d7f10efc-4222-4871-b684-dc482fd27b01/placement-api/0.log" Nov 25 22:31:40 crc kubenswrapper[4910]: I1125 22:31:40.567953 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_bc2bbda0-2d3e-4794-bc13-21bca025c6fe/setup-container/0.log" Nov 25 22:31:40 crc kubenswrapper[4910]: I1125 22:31:40.734987 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_bc2bbda0-2d3e-4794-bc13-21bca025c6fe/setup-container/0.log" Nov 25 22:31:40 crc kubenswrapper[4910]: I1125 22:31:40.832165 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_9d06ec4c-6e1e-4fc9-9e41-59857b4494fd/setup-container/0.log" Nov 25 22:31:40 crc kubenswrapper[4910]: I1125 22:31:40.839897 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_bc2bbda0-2d3e-4794-bc13-21bca025c6fe/rabbitmq/0.log" Nov 25 22:31:41 crc kubenswrapper[4910]: I1125 22:31:41.133131 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_9d06ec4c-6e1e-4fc9-9e41-59857b4494fd/setup-container/0.log" Nov 25 22:31:41 crc kubenswrapper[4910]: I1125 22:31:41.196654 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_9d06ec4c-6e1e-4fc9-9e41-59857b4494fd/rabbitmq/0.log" Nov 25 22:31:41 crc kubenswrapper[4910]: I1125 22:31:41.256034 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp_c552b066-9a2f-46d0-9865-adaa8c454811/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:41 crc kubenswrapper[4910]: I1125 22:31:41.516970 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-4z9c2_444f986e-2346-419a-a78e-584196602880/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:41 crc kubenswrapper[4910]: I1125 22:31:41.607657 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv_9cd37c71-fc60-4099-a183-6f9e8a918e1e/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:41 crc kubenswrapper[4910]: I1125 22:31:41.737859 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-t7clg_77fc796b-aaee-4dad-a82d-464aaf60ab47/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:41 crc kubenswrapper[4910]: I1125 22:31:41.987695 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-r9mbm_475e4e94-21eb-40fb-8d3d-b5359cc77a88/ssh-known-hosts-edpm-deployment/0.log" Nov 25 22:31:42 crc kubenswrapper[4910]: I1125 22:31:42.118083 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6575886cb7-hv9qm_8028bd01-f5f2-4c20-9f51-c6a7e06571fd/proxy-httpd/0.log" Nov 25 22:31:42 crc kubenswrapper[4910]: I1125 22:31:42.121558 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6575886cb7-hv9qm_8028bd01-f5f2-4c20-9f51-c6a7e06571fd/proxy-server/0.log" Nov 25 22:31:42 crc kubenswrapper[4910]: I1125 22:31:42.325468 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-v7clx_5050ee25-88de-4888-ba01-fc11c71df0a1/swift-ring-rebalance/0.log" Nov 25 22:31:42 crc kubenswrapper[4910]: I1125 22:31:42.378251 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/account-auditor/0.log" Nov 25 22:31:42 crc kubenswrapper[4910]: I1125 22:31:42.492681 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/account-reaper/0.log" Nov 25 22:31:42 crc kubenswrapper[4910]: I1125 22:31:42.598027 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/account-replicator/0.log" Nov 25 22:31:42 crc kubenswrapper[4910]: I1125 22:31:42.647784 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/account-server/0.log" Nov 25 22:31:42 crc kubenswrapper[4910]: I1125 22:31:42.688025 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/container-auditor/0.log" Nov 25 22:31:42 crc kubenswrapper[4910]: I1125 22:31:42.989490 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/container-replicator/0.log" Nov 25 22:31:43 crc kubenswrapper[4910]: I1125 22:31:43.021121 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/container-server/0.log" Nov 25 22:31:43 crc kubenswrapper[4910]: I1125 22:31:43.100333 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/container-updater/0.log" Nov 25 22:31:43 crc kubenswrapper[4910]: I1125 22:31:43.168752 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/object-auditor/0.log" Nov 25 22:31:43 crc kubenswrapper[4910]: I1125 22:31:43.284411 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/object-expirer/0.log" Nov 25 22:31:43 crc kubenswrapper[4910]: I1125 22:31:43.298359 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/object-replicator/0.log" Nov 25 22:31:43 crc kubenswrapper[4910]: I1125 22:31:43.377744 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/object-server/0.log" Nov 25 22:31:43 crc kubenswrapper[4910]: I1125 22:31:43.460613 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/object-updater/0.log" Nov 25 22:31:43 crc kubenswrapper[4910]: I1125 22:31:43.498973 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/swift-recon-cron/0.log" Nov 25 22:31:43 crc kubenswrapper[4910]: I1125 22:31:43.542113 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/rsync/0.log" Nov 25 22:31:43 crc kubenswrapper[4910]: I1125 22:31:43.724798 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw_66309eee-ce32-4108-82f9-e96dbc03dc45/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:43 crc kubenswrapper[4910]: I1125 22:31:43.825058 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_c1d3df8e-e3e1-4065-8736-979a4abaec2c/tempest-tests-tempest-tests-runner/0.log" Nov 25 22:31:43 crc kubenswrapper[4910]: I1125 22:31:43.980176 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_2fc860e3-9496-49ee-8083-468eb806013d/test-operator-logs-container/0.log" Nov 25 22:31:44 crc kubenswrapper[4910]: I1125 22:31:44.069132 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj_d8581103-5144-4384-8d68-9160c64f6233/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:31:49 crc kubenswrapper[4910]: I1125 22:31:49.217440 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:31:49 crc kubenswrapper[4910]: E1125 22:31:49.219590 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:31:53 crc kubenswrapper[4910]: I1125 22:31:53.891426 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_c811f98e-8a72-406b-b0c3-35a7102dd46e/memcached/0.log" Nov 25 22:32:04 crc kubenswrapper[4910]: I1125 22:32:04.204954 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:32:04 crc kubenswrapper[4910]: E1125 22:32:04.206709 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:32:13 crc kubenswrapper[4910]: I1125 22:32:13.145494 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp_770f8bcb-c718-44f0-9311-d3f1a782aed2/util/0.log" Nov 25 22:32:13 crc kubenswrapper[4910]: I1125 22:32:13.644966 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp_770f8bcb-c718-44f0-9311-d3f1a782aed2/pull/0.log" Nov 25 22:32:13 crc kubenswrapper[4910]: I1125 22:32:13.664338 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp_770f8bcb-c718-44f0-9311-d3f1a782aed2/util/0.log" Nov 25 22:32:13 crc kubenswrapper[4910]: I1125 22:32:13.715092 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp_770f8bcb-c718-44f0-9311-d3f1a782aed2/pull/0.log" Nov 25 22:32:13 crc kubenswrapper[4910]: I1125 22:32:13.887207 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp_770f8bcb-c718-44f0-9311-d3f1a782aed2/util/0.log" Nov 25 22:32:13 crc kubenswrapper[4910]: I1125 22:32:13.966738 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp_770f8bcb-c718-44f0-9311-d3f1a782aed2/pull/0.log" Nov 25 22:32:13 crc kubenswrapper[4910]: I1125 22:32:13.975425 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp_770f8bcb-c718-44f0-9311-d3f1a782aed2/extract/0.log" Nov 25 22:32:14 crc kubenswrapper[4910]: I1125 22:32:14.132406 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-x58hx_22c915a2-80bf-454b-b0e6-7a5bbafec7a5/kube-rbac-proxy/0.log" Nov 25 22:32:14 crc kubenswrapper[4910]: I1125 22:32:14.162470 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-gvsd5_ef4b8019-398c-453d-9b78-71c340bf2bdd/kube-rbac-proxy/0.log" Nov 25 22:32:14 crc kubenswrapper[4910]: I1125 22:32:14.266476 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-x58hx_22c915a2-80bf-454b-b0e6-7a5bbafec7a5/manager/0.log" Nov 25 22:32:14 crc kubenswrapper[4910]: I1125 22:32:14.435425 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-gvsd5_ef4b8019-398c-453d-9b78-71c340bf2bdd/manager/0.log" Nov 25 22:32:14 crc kubenswrapper[4910]: I1125 22:32:14.482328 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-gbgd2_cd608fcb-14bd-424e-9f6e-c0eea37397ea/manager/0.log" Nov 25 22:32:14 crc kubenswrapper[4910]: I1125 22:32:14.509236 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-gbgd2_cd608fcb-14bd-424e-9f6e-c0eea37397ea/kube-rbac-proxy/0.log" Nov 25 22:32:14 crc kubenswrapper[4910]: I1125 22:32:14.682767 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-776b995c47-chsbs_bd7a08fe-f30e-4a1e-a92d-7c813fd14fa9/kube-rbac-proxy/0.log" Nov 25 22:32:14 crc kubenswrapper[4910]: I1125 22:32:14.819909 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-776b995c47-chsbs_bd7a08fe-f30e-4a1e-a92d-7c813fd14fa9/manager/0.log" Nov 25 22:32:14 crc kubenswrapper[4910]: I1125 22:32:14.922971 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-vwgmq_28574aa2-4470-4432-b7f0-4b3b52b5f8b9/kube-rbac-proxy/0.log" Nov 25 22:32:15 crc kubenswrapper[4910]: I1125 22:32:15.020996 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-vwgmq_28574aa2-4470-4432-b7f0-4b3b52b5f8b9/manager/0.log" Nov 25 22:32:15 crc kubenswrapper[4910]: I1125 22:32:15.087788 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-pqkz6_6c888013-ea9c-433c-973f-af7c5c22f8c9/kube-rbac-proxy/0.log" Nov 25 22:32:15 crc kubenswrapper[4910]: I1125 22:32:15.149270 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-pqkz6_6c888013-ea9c-433c-973f-af7c5c22f8c9/manager/0.log" Nov 25 22:32:15 crc kubenswrapper[4910]: I1125 22:32:15.276998 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-9mx4m_0345d3a7-45fa-4bce-8dcb-4bef18de4b21/kube-rbac-proxy/0.log" Nov 25 22:32:15 crc kubenswrapper[4910]: I1125 22:32:15.464551 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-9mx4m_0345d3a7-45fa-4bce-8dcb-4bef18de4b21/manager/0.log" Nov 25 22:32:15 crc kubenswrapper[4910]: I1125 22:32:15.602296 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-q9hg4_2cce005e-33cd-4b63-8798-b0b7eb53ba73/kube-rbac-proxy/0.log" Nov 25 22:32:15 crc kubenswrapper[4910]: I1125 22:32:15.605163 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-q9hg4_2cce005e-33cd-4b63-8798-b0b7eb53ba73/manager/0.log" Nov 25 22:32:15 crc kubenswrapper[4910]: I1125 22:32:15.756331 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-546d4bdf48-4hf76_c27ac874-a062-4342-9559-a14acbff4c9d/kube-rbac-proxy/0.log" Nov 25 22:32:15 crc kubenswrapper[4910]: I1125 22:32:15.882617 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-546d4bdf48-4hf76_c27ac874-a062-4342-9559-a14acbff4c9d/manager/0.log" Nov 25 22:32:15 crc kubenswrapper[4910]: I1125 22:32:15.993736 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6546668bfd-z9dd7_5c6fa310-d85a-4ac3-be15-478635a8c221/kube-rbac-proxy/0.log" Nov 25 22:32:15 crc kubenswrapper[4910]: I1125 22:32:15.993997 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6546668bfd-z9dd7_5c6fa310-d85a-4ac3-be15-478635a8c221/manager/0.log" Nov 25 22:32:16 crc kubenswrapper[4910]: I1125 22:32:16.095963 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-tkpxw_44442e02-9c1f-4a6e-bcdd-237b8260638d/kube-rbac-proxy/0.log" Nov 25 22:32:16 crc kubenswrapper[4910]: I1125 22:32:16.224547 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-tkpxw_44442e02-9c1f-4a6e-bcdd-237b8260638d/manager/0.log" Nov 25 22:32:16 crc kubenswrapper[4910]: I1125 22:32:16.327476 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-t4qfc_eadae0d9-eee7-42f3-aa0e-c42ef3282f24/kube-rbac-proxy/0.log" Nov 25 22:32:16 crc kubenswrapper[4910]: I1125 22:32:16.390325 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-t4qfc_eadae0d9-eee7-42f3-aa0e-c42ef3282f24/manager/0.log" Nov 25 22:32:16 crc kubenswrapper[4910]: I1125 22:32:16.458996 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-555bbdd45-7f94x_ad233905-ebeb-4698-8261-d8a395be75d7/kube-rbac-proxy/0.log" Nov 25 22:32:16 crc kubenswrapper[4910]: I1125 22:32:16.638545 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-555bbdd45-7f94x_ad233905-ebeb-4698-8261-d8a395be75d7/manager/0.log" Nov 25 22:32:16 crc kubenswrapper[4910]: I1125 22:32:16.712268 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-czf4h_2c94f244-a036-47af-8ba4-5dfe41ad5e66/kube-rbac-proxy/0.log" Nov 25 22:32:16 crc kubenswrapper[4910]: I1125 22:32:16.779719 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-czf4h_2c94f244-a036-47af-8ba4-5dfe41ad5e66/manager/0.log" Nov 25 22:32:16 crc kubenswrapper[4910]: I1125 22:32:16.845269 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd_65280dcb-6ac6-443b-88f0-7d3b0dadb4f8/kube-rbac-proxy/0.log" Nov 25 22:32:16 crc kubenswrapper[4910]: I1125 22:32:16.933362 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd_65280dcb-6ac6-443b-88f0-7d3b0dadb4f8/manager/0.log" Nov 25 22:32:17 crc kubenswrapper[4910]: I1125 22:32:17.406136 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5475b86485-8h9j5_207712f3-d06c-435f-9a0d-f6a895ee4578/operator/0.log" Nov 25 22:32:17 crc kubenswrapper[4910]: I1125 22:32:17.493526 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-7lqkm_a4af1c8b-9a29-47cc-aec2-501fe04e24fd/registry-server/0.log" Nov 25 22:32:17 crc kubenswrapper[4910]: I1125 22:32:17.572648 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-6c4d7c9757-gpg9j_5c38fd88-4bb1-4d48-a8d4-fe533cbb2d0c/kube-rbac-proxy/0.log" Nov 25 22:32:17 crc kubenswrapper[4910]: I1125 22:32:17.784561 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-6c4d7c9757-gpg9j_5c38fd88-4bb1-4d48-a8d4-fe533cbb2d0c/manager/0.log" Nov 25 22:32:18 crc kubenswrapper[4910]: I1125 22:32:18.032261 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-wsd5q_f3384730-e8d8-4e36-9f3e-8e2dbf3176cb/kube-rbac-proxy/0.log" Nov 25 22:32:18 crc kubenswrapper[4910]: I1125 22:32:18.146689 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-wsd5q_f3384730-e8d8-4e36-9f3e-8e2dbf3176cb/manager/0.log" Nov 25 22:32:18 crc kubenswrapper[4910]: I1125 22:32:18.205644 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:32:18 crc kubenswrapper[4910]: E1125 22:32:18.205859 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:32:18 crc kubenswrapper[4910]: I1125 22:32:18.299046 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-dvpfs_838225a6-f682-4181-aeab-073767c8d49a/operator/0.log" Nov 25 22:32:18 crc kubenswrapper[4910]: I1125 22:32:18.354680 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-758b84fd57-x2sxf_9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9/manager/0.log" Nov 25 22:32:18 crc kubenswrapper[4910]: I1125 22:32:18.405480 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-wc77r_ba6c852e-59d0-4e5a-8967-3502457d62ec/kube-rbac-proxy/0.log" Nov 25 22:32:18 crc kubenswrapper[4910]: I1125 22:32:18.574200 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-wc77r_ba6c852e-59d0-4e5a-8967-3502457d62ec/manager/0.log" Nov 25 22:32:18 crc kubenswrapper[4910]: I1125 22:32:18.621846 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-6zxdg_e117033c-b566-4c46-bd57-9e173e88a224/kube-rbac-proxy/0.log" Nov 25 22:32:18 crc kubenswrapper[4910]: I1125 22:32:18.776598 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-6zxdg_e117033c-b566-4c46-bd57-9e173e88a224/manager/0.log" Nov 25 22:32:18 crc kubenswrapper[4910]: I1125 22:32:18.869909 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-74fcfc6d4b-2jdvk_bb7d559d-1779-400f-b556-1adbb0c61b60/kube-rbac-proxy/0.log" Nov 25 22:32:18 crc kubenswrapper[4910]: I1125 22:32:18.889575 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-74fcfc6d4b-2jdvk_bb7d559d-1779-400f-b556-1adbb0c61b60/manager/0.log" Nov 25 22:32:18 crc kubenswrapper[4910]: I1125 22:32:18.996332 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-g9p8x_0ac69701-2b03-4d80-bb8f-8f46acb193e4/kube-rbac-proxy/0.log" Nov 25 22:32:19 crc kubenswrapper[4910]: I1125 22:32:19.067741 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-g9p8x_0ac69701-2b03-4d80-bb8f-8f46acb193e4/manager/0.log" Nov 25 22:32:31 crc kubenswrapper[4910]: I1125 22:32:31.204479 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:32:31 crc kubenswrapper[4910]: E1125 22:32:31.205448 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:32:41 crc kubenswrapper[4910]: I1125 22:32:41.100075 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-6ncxd_b8eb6262-2c30-4192-8936-9463698c361e/control-plane-machine-set-operator/0.log" Nov 25 22:32:41 crc kubenswrapper[4910]: I1125 22:32:41.376779 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-8gf7r_bc0f5871-442b-4fa3-863c-173c2df1ffd4/kube-rbac-proxy/0.log" Nov 25 22:32:41 crc kubenswrapper[4910]: I1125 22:32:41.381581 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-8gf7r_bc0f5871-442b-4fa3-863c-173c2df1ffd4/machine-api-operator/0.log" Nov 25 22:32:42 crc kubenswrapper[4910]: I1125 22:32:42.205477 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:32:42 crc kubenswrapper[4910]: E1125 22:32:42.206542 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:32:54 crc kubenswrapper[4910]: I1125 22:32:54.204230 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:32:54 crc kubenswrapper[4910]: E1125 22:32:54.205839 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:32:55 crc kubenswrapper[4910]: I1125 22:32:55.596963 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-kl2mf_949d68d4-09e9-4d53-a0d6-0d667e0c7b09/cert-manager-controller/0.log" Nov 25 22:32:55 crc kubenswrapper[4910]: I1125 22:32:55.751437 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-fxdt9_a1017150-9116-4453-84f8-bc8148ee529e/cert-manager-cainjector/0.log" Nov 25 22:32:55 crc kubenswrapper[4910]: I1125 22:32:55.814822 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-xtxpk_ce2757a9-3fa1-4cf5-9ace-bc7cc1922640/cert-manager-webhook/0.log" Nov 25 22:33:07 crc kubenswrapper[4910]: I1125 22:33:07.204109 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:33:07 crc kubenswrapper[4910]: E1125 22:33:07.205290 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:33:10 crc kubenswrapper[4910]: I1125 22:33:10.115773 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-h5pzd_97f18c66-ca4f-40ce-8b4f-b43cd7a99690/nmstate-console-plugin/0.log" Nov 25 22:33:10 crc kubenswrapper[4910]: I1125 22:33:10.317579 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-2hz8s_27c220ba-4a63-4e7f-85f5-f1aa823b41cc/nmstate-handler/0.log" Nov 25 22:33:10 crc kubenswrapper[4910]: I1125 22:33:10.391521 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-m5hsc_db10171c-40c8-4bfd-88b8-c1bd80b4e37c/nmstate-metrics/0.log" Nov 25 22:33:10 crc kubenswrapper[4910]: I1125 22:33:10.392198 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-m5hsc_db10171c-40c8-4bfd-88b8-c1bd80b4e37c/kube-rbac-proxy/0.log" Nov 25 22:33:10 crc kubenswrapper[4910]: I1125 22:33:10.579460 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-bgkcs_3b2b7b01-5b19-471d-bec2-10f3182a21cd/nmstate-operator/0.log" Nov 25 22:33:10 crc kubenswrapper[4910]: I1125 22:33:10.630605 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-56dnp_f21c62ce-5e4c-4730-afa0-9d4ef734952f/nmstate-webhook/0.log" Nov 25 22:33:18 crc kubenswrapper[4910]: I1125 22:33:18.205344 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:33:18 crc kubenswrapper[4910]: E1125 22:33:18.206229 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:33:27 crc kubenswrapper[4910]: I1125 22:33:27.909598 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-57bhp_8517a279-1eca-4be6-a4c0-09716207a094/kube-rbac-proxy/0.log" Nov 25 22:33:27 crc kubenswrapper[4910]: I1125 22:33:27.959292 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-57bhp_8517a279-1eca-4be6-a4c0-09716207a094/controller/0.log" Nov 25 22:33:28 crc kubenswrapper[4910]: I1125 22:33:28.134412 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-frr-files/0.log" Nov 25 22:33:28 crc kubenswrapper[4910]: I1125 22:33:28.288256 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-frr-files/0.log" Nov 25 22:33:28 crc kubenswrapper[4910]: I1125 22:33:28.298390 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-metrics/0.log" Nov 25 22:33:28 crc kubenswrapper[4910]: I1125 22:33:28.333597 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-reloader/0.log" Nov 25 22:33:28 crc kubenswrapper[4910]: I1125 22:33:28.358443 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-reloader/0.log" Nov 25 22:33:28 crc kubenswrapper[4910]: I1125 22:33:28.605320 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-reloader/0.log" Nov 25 22:33:28 crc kubenswrapper[4910]: I1125 22:33:28.626302 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-metrics/0.log" Nov 25 22:33:28 crc kubenswrapper[4910]: I1125 22:33:28.632498 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-metrics/0.log" Nov 25 22:33:28 crc kubenswrapper[4910]: I1125 22:33:28.648013 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-frr-files/0.log" Nov 25 22:33:28 crc kubenswrapper[4910]: I1125 22:33:28.817547 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-metrics/0.log" Nov 25 22:33:28 crc kubenswrapper[4910]: I1125 22:33:28.841169 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-frr-files/0.log" Nov 25 22:33:28 crc kubenswrapper[4910]: I1125 22:33:28.851855 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-reloader/0.log" Nov 25 22:33:28 crc kubenswrapper[4910]: I1125 22:33:28.913418 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/controller/0.log" Nov 25 22:33:29 crc kubenswrapper[4910]: I1125 22:33:29.073627 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/frr-metrics/0.log" Nov 25 22:33:29 crc kubenswrapper[4910]: I1125 22:33:29.166459 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/kube-rbac-proxy/0.log" Nov 25 22:33:29 crc kubenswrapper[4910]: I1125 22:33:29.209018 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/kube-rbac-proxy-frr/0.log" Nov 25 22:33:29 crc kubenswrapper[4910]: I1125 22:33:29.345757 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/reloader/0.log" Nov 25 22:33:29 crc kubenswrapper[4910]: I1125 22:33:29.498489 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-csqvk_5f6618c7-ba0f-45ce-a1f1-d42f55e72500/frr-k8s-webhook-server/0.log" Nov 25 22:33:29 crc kubenswrapper[4910]: I1125 22:33:29.934158 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-9c5c567bb-p4k28_6299e276-3b3c-4c65-abab-321a1129c175/manager/0.log" Nov 25 22:33:30 crc kubenswrapper[4910]: I1125 22:33:30.181225 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-747dcffbf8-gz64s_7cd2774b-6d1f-4fc6-811e-a13f715832ab/webhook-server/0.log" Nov 25 22:33:30 crc kubenswrapper[4910]: I1125 22:33:30.271747 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fmrxg_6dcf2928-8050-4c63-9035-35b85bb922ce/kube-rbac-proxy/0.log" Nov 25 22:33:30 crc kubenswrapper[4910]: I1125 22:33:30.327748 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/frr/0.log" Nov 25 22:33:30 crc kubenswrapper[4910]: I1125 22:33:30.796675 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fmrxg_6dcf2928-8050-4c63-9035-35b85bb922ce/speaker/0.log" Nov 25 22:33:33 crc kubenswrapper[4910]: I1125 22:33:33.205175 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:33:33 crc kubenswrapper[4910]: E1125 22:33:33.206667 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:33:47 crc kubenswrapper[4910]: I1125 22:33:47.204454 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:33:47 crc kubenswrapper[4910]: E1125 22:33:47.206721 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:33:47 crc kubenswrapper[4910]: I1125 22:33:47.348493 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc_3c3663ce-55da-4575-b39f-43df7bf5e729/util/0.log" Nov 25 22:33:47 crc kubenswrapper[4910]: I1125 22:33:47.542308 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc_3c3663ce-55da-4575-b39f-43df7bf5e729/util/0.log" Nov 25 22:33:47 crc kubenswrapper[4910]: I1125 22:33:47.566213 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc_3c3663ce-55da-4575-b39f-43df7bf5e729/pull/0.log" Nov 25 22:33:47 crc kubenswrapper[4910]: I1125 22:33:47.598136 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc_3c3663ce-55da-4575-b39f-43df7bf5e729/pull/0.log" Nov 25 22:33:47 crc kubenswrapper[4910]: I1125 22:33:47.786924 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc_3c3663ce-55da-4575-b39f-43df7bf5e729/util/0.log" Nov 25 22:33:47 crc kubenswrapper[4910]: I1125 22:33:47.801629 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc_3c3663ce-55da-4575-b39f-43df7bf5e729/pull/0.log" Nov 25 22:33:47 crc kubenswrapper[4910]: I1125 22:33:47.843494 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc_3c3663ce-55da-4575-b39f-43df7bf5e729/extract/0.log" Nov 25 22:33:48 crc kubenswrapper[4910]: I1125 22:33:48.754489 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-776qn_7d6e52b7-c568-4b50-9af0-70a8ce753479/extract-utilities/0.log" Nov 25 22:33:48 crc kubenswrapper[4910]: I1125 22:33:48.971274 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-776qn_7d6e52b7-c568-4b50-9af0-70a8ce753479/extract-content/0.log" Nov 25 22:33:48 crc kubenswrapper[4910]: I1125 22:33:48.980770 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-776qn_7d6e52b7-c568-4b50-9af0-70a8ce753479/extract-content/0.log" Nov 25 22:33:49 crc kubenswrapper[4910]: I1125 22:33:49.006826 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-776qn_7d6e52b7-c568-4b50-9af0-70a8ce753479/extract-utilities/0.log" Nov 25 22:33:49 crc kubenswrapper[4910]: I1125 22:33:49.230327 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-776qn_7d6e52b7-c568-4b50-9af0-70a8ce753479/extract-utilities/0.log" Nov 25 22:33:49 crc kubenswrapper[4910]: I1125 22:33:49.310258 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-776qn_7d6e52b7-c568-4b50-9af0-70a8ce753479/extract-content/0.log" Nov 25 22:33:49 crc kubenswrapper[4910]: I1125 22:33:49.510331 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-whlck_2a179743-09db-4281-b030-23d453ecc1d6/extract-utilities/0.log" Nov 25 22:33:49 crc kubenswrapper[4910]: I1125 22:33:49.949042 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-whlck_2a179743-09db-4281-b030-23d453ecc1d6/extract-content/0.log" Nov 25 22:33:49 crc kubenswrapper[4910]: I1125 22:33:49.959632 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-whlck_2a179743-09db-4281-b030-23d453ecc1d6/extract-utilities/0.log" Nov 25 22:33:49 crc kubenswrapper[4910]: I1125 22:33:49.988731 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-whlck_2a179743-09db-4281-b030-23d453ecc1d6/extract-content/0.log" Nov 25 22:33:50 crc kubenswrapper[4910]: I1125 22:33:50.313745 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-776qn_7d6e52b7-c568-4b50-9af0-70a8ce753479/registry-server/0.log" Nov 25 22:33:50 crc kubenswrapper[4910]: I1125 22:33:50.325131 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-whlck_2a179743-09db-4281-b030-23d453ecc1d6/extract-utilities/0.log" Nov 25 22:33:50 crc kubenswrapper[4910]: I1125 22:33:50.372878 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-whlck_2a179743-09db-4281-b030-23d453ecc1d6/extract-content/0.log" Nov 25 22:33:50 crc kubenswrapper[4910]: I1125 22:33:50.540547 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l_52380560-51e8-43b1-9b6e-8036f43b20c3/util/0.log" Nov 25 22:33:50 crc kubenswrapper[4910]: I1125 22:33:50.829863 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l_52380560-51e8-43b1-9b6e-8036f43b20c3/pull/0.log" Nov 25 22:33:50 crc kubenswrapper[4910]: I1125 22:33:50.889613 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l_52380560-51e8-43b1-9b6e-8036f43b20c3/util/0.log" Nov 25 22:33:50 crc kubenswrapper[4910]: I1125 22:33:50.893620 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l_52380560-51e8-43b1-9b6e-8036f43b20c3/pull/0.log" Nov 25 22:33:50 crc kubenswrapper[4910]: I1125 22:33:50.930006 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-whlck_2a179743-09db-4281-b030-23d453ecc1d6/registry-server/0.log" Nov 25 22:33:51 crc kubenswrapper[4910]: I1125 22:33:51.085506 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l_52380560-51e8-43b1-9b6e-8036f43b20c3/util/0.log" Nov 25 22:33:51 crc kubenswrapper[4910]: I1125 22:33:51.104914 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l_52380560-51e8-43b1-9b6e-8036f43b20c3/pull/0.log" Nov 25 22:33:51 crc kubenswrapper[4910]: I1125 22:33:51.133634 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l_52380560-51e8-43b1-9b6e-8036f43b20c3/extract/0.log" Nov 25 22:33:51 crc kubenswrapper[4910]: I1125 22:33:51.197552 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-smb6m_e2af0945-04e5-4220-981f-d7a4892fcf69/marketplace-operator/0.log" Nov 25 22:33:51 crc kubenswrapper[4910]: I1125 22:33:51.326671 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-drk5r_ada3f847-455b-464f-9d23-7052e0d91f2b/extract-utilities/0.log" Nov 25 22:33:51 crc kubenswrapper[4910]: I1125 22:33:51.510460 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-drk5r_ada3f847-455b-464f-9d23-7052e0d91f2b/extract-content/0.log" Nov 25 22:33:51 crc kubenswrapper[4910]: I1125 22:33:51.510723 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-drk5r_ada3f847-455b-464f-9d23-7052e0d91f2b/extract-utilities/0.log" Nov 25 22:33:51 crc kubenswrapper[4910]: I1125 22:33:51.545713 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-drk5r_ada3f847-455b-464f-9d23-7052e0d91f2b/extract-content/0.log" Nov 25 22:33:51 crc kubenswrapper[4910]: I1125 22:33:51.729851 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-drk5r_ada3f847-455b-464f-9d23-7052e0d91f2b/extract-utilities/0.log" Nov 25 22:33:51 crc kubenswrapper[4910]: I1125 22:33:51.745114 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-drk5r_ada3f847-455b-464f-9d23-7052e0d91f2b/extract-content/0.log" Nov 25 22:33:51 crc kubenswrapper[4910]: I1125 22:33:51.796212 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-94j8s_c4ff820e-8002-4ef2-aec8-296b6ec41494/extract-utilities/0.log" Nov 25 22:33:51 crc kubenswrapper[4910]: I1125 22:33:51.959900 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-drk5r_ada3f847-455b-464f-9d23-7052e0d91f2b/registry-server/0.log" Nov 25 22:33:52 crc kubenswrapper[4910]: I1125 22:33:52.055742 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-94j8s_c4ff820e-8002-4ef2-aec8-296b6ec41494/extract-content/0.log" Nov 25 22:33:52 crc kubenswrapper[4910]: I1125 22:33:52.058108 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-94j8s_c4ff820e-8002-4ef2-aec8-296b6ec41494/extract-content/0.log" Nov 25 22:33:52 crc kubenswrapper[4910]: I1125 22:33:52.064938 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-94j8s_c4ff820e-8002-4ef2-aec8-296b6ec41494/extract-utilities/0.log" Nov 25 22:33:52 crc kubenswrapper[4910]: I1125 22:33:52.230974 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-94j8s_c4ff820e-8002-4ef2-aec8-296b6ec41494/extract-content/0.log" Nov 25 22:33:52 crc kubenswrapper[4910]: I1125 22:33:52.249512 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-94j8s_c4ff820e-8002-4ef2-aec8-296b6ec41494/extract-utilities/0.log" Nov 25 22:33:52 crc kubenswrapper[4910]: I1125 22:33:52.367770 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-94j8s_c4ff820e-8002-4ef2-aec8-296b6ec41494/registry-server/0.log" Nov 25 22:34:02 crc kubenswrapper[4910]: I1125 22:34:02.206416 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:34:02 crc kubenswrapper[4910]: E1125 22:34:02.208033 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:34:13 crc kubenswrapper[4910]: I1125 22:34:13.206537 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:34:13 crc kubenswrapper[4910]: E1125 22:34:13.207701 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:34:26 crc kubenswrapper[4910]: I1125 22:34:26.204877 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:34:26 crc kubenswrapper[4910]: E1125 22:34:26.205794 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:34:27 crc kubenswrapper[4910]: E1125 22:34:27.586531 4910 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.142:59576->38.102.83.142:46763: write tcp 38.102.83.142:59576->38.102.83.142:46763: write: broken pipe Nov 25 22:34:40 crc kubenswrapper[4910]: I1125 22:34:40.205077 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:34:40 crc kubenswrapper[4910]: E1125 22:34:40.209432 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:34:54 crc kubenswrapper[4910]: I1125 22:34:54.204891 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:34:55 crc kubenswrapper[4910]: I1125 22:34:55.541562 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"ba9869401faf0daa1e3b20ff8fe3e39564363a9833a4e10f10eec05aeff7e2b2"} Nov 25 22:35:40 crc kubenswrapper[4910]: I1125 22:35:40.196614 4910 generic.go:334] "Generic (PLEG): container finished" podID="9de7ad82-ff82-47a7-9393-cf5ce90f749e" containerID="3900b6394b9685f236a3b034b29aa8aa14b3f1920d76304c688685f5d3915607" exitCode=0 Nov 25 22:35:40 crc kubenswrapper[4910]: I1125 22:35:40.196741 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lqrwm/must-gather-fnf5s" event={"ID":"9de7ad82-ff82-47a7-9393-cf5ce90f749e","Type":"ContainerDied","Data":"3900b6394b9685f236a3b034b29aa8aa14b3f1920d76304c688685f5d3915607"} Nov 25 22:35:40 crc kubenswrapper[4910]: I1125 22:35:40.198867 4910 scope.go:117] "RemoveContainer" containerID="3900b6394b9685f236a3b034b29aa8aa14b3f1920d76304c688685f5d3915607" Nov 25 22:35:40 crc kubenswrapper[4910]: I1125 22:35:40.605961 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-lqrwm_must-gather-fnf5s_9de7ad82-ff82-47a7-9393-cf5ce90f749e/gather/0.log" Nov 25 22:35:49 crc kubenswrapper[4910]: I1125 22:35:49.874761 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-lqrwm/must-gather-fnf5s"] Nov 25 22:35:49 crc kubenswrapper[4910]: I1125 22:35:49.875888 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-lqrwm/must-gather-fnf5s" podUID="9de7ad82-ff82-47a7-9393-cf5ce90f749e" containerName="copy" containerID="cri-o://6efac0be1c740c8bdfcef85c95345ae2cbbfd6cbc1a059efa126f7a925939951" gracePeriod=2 Nov 25 22:35:49 crc kubenswrapper[4910]: I1125 22:35:49.886319 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-lqrwm/must-gather-fnf5s"] Nov 25 22:35:50 crc kubenswrapper[4910]: I1125 22:35:50.310135 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-lqrwm_must-gather-fnf5s_9de7ad82-ff82-47a7-9393-cf5ce90f749e/copy/0.log" Nov 25 22:35:50 crc kubenswrapper[4910]: I1125 22:35:50.311403 4910 generic.go:334] "Generic (PLEG): container finished" podID="9de7ad82-ff82-47a7-9393-cf5ce90f749e" containerID="6efac0be1c740c8bdfcef85c95345ae2cbbfd6cbc1a059efa126f7a925939951" exitCode=143 Nov 25 22:35:50 crc kubenswrapper[4910]: I1125 22:35:50.776884 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-lqrwm_must-gather-fnf5s_9de7ad82-ff82-47a7-9393-cf5ce90f749e/copy/0.log" Nov 25 22:35:50 crc kubenswrapper[4910]: I1125 22:35:50.777592 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/must-gather-fnf5s" Nov 25 22:35:50 crc kubenswrapper[4910]: I1125 22:35:50.933635 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bbxsv\" (UniqueName: \"kubernetes.io/projected/9de7ad82-ff82-47a7-9393-cf5ce90f749e-kube-api-access-bbxsv\") pod \"9de7ad82-ff82-47a7-9393-cf5ce90f749e\" (UID: \"9de7ad82-ff82-47a7-9393-cf5ce90f749e\") " Nov 25 22:35:50 crc kubenswrapper[4910]: I1125 22:35:50.933754 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9de7ad82-ff82-47a7-9393-cf5ce90f749e-must-gather-output\") pod \"9de7ad82-ff82-47a7-9393-cf5ce90f749e\" (UID: \"9de7ad82-ff82-47a7-9393-cf5ce90f749e\") " Nov 25 22:35:50 crc kubenswrapper[4910]: I1125 22:35:50.958815 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9de7ad82-ff82-47a7-9393-cf5ce90f749e-kube-api-access-bbxsv" (OuterVolumeSpecName: "kube-api-access-bbxsv") pod "9de7ad82-ff82-47a7-9393-cf5ce90f749e" (UID: "9de7ad82-ff82-47a7-9393-cf5ce90f749e"). InnerVolumeSpecName "kube-api-access-bbxsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:35:51 crc kubenswrapper[4910]: I1125 22:35:51.036776 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bbxsv\" (UniqueName: \"kubernetes.io/projected/9de7ad82-ff82-47a7-9393-cf5ce90f749e-kube-api-access-bbxsv\") on node \"crc\" DevicePath \"\"" Nov 25 22:35:51 crc kubenswrapper[4910]: I1125 22:35:51.142892 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9de7ad82-ff82-47a7-9393-cf5ce90f749e-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "9de7ad82-ff82-47a7-9393-cf5ce90f749e" (UID: "9de7ad82-ff82-47a7-9393-cf5ce90f749e"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:35:51 crc kubenswrapper[4910]: I1125 22:35:51.215974 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9de7ad82-ff82-47a7-9393-cf5ce90f749e" path="/var/lib/kubelet/pods/9de7ad82-ff82-47a7-9393-cf5ce90f749e/volumes" Nov 25 22:35:51 crc kubenswrapper[4910]: I1125 22:35:51.241142 4910 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9de7ad82-ff82-47a7-9393-cf5ce90f749e-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 25 22:35:51 crc kubenswrapper[4910]: I1125 22:35:51.321661 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-lqrwm_must-gather-fnf5s_9de7ad82-ff82-47a7-9393-cf5ce90f749e/copy/0.log" Nov 25 22:35:51 crc kubenswrapper[4910]: I1125 22:35:51.322472 4910 scope.go:117] "RemoveContainer" containerID="6efac0be1c740c8bdfcef85c95345ae2cbbfd6cbc1a059efa126f7a925939951" Nov 25 22:35:51 crc kubenswrapper[4910]: I1125 22:35:51.322556 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lqrwm/must-gather-fnf5s" Nov 25 22:35:51 crc kubenswrapper[4910]: I1125 22:35:51.357519 4910 scope.go:117] "RemoveContainer" containerID="3900b6394b9685f236a3b034b29aa8aa14b3f1920d76304c688685f5d3915607" Nov 25 22:37:12 crc kubenswrapper[4910]: I1125 22:37:12.903424 4910 scope.go:117] "RemoveContainer" containerID="1dbe301091e89d355a7757858dfbaa82c96bd8bf45f405fd338f64adc9c41302" Nov 25 22:37:12 crc kubenswrapper[4910]: I1125 22:37:12.936023 4910 scope.go:117] "RemoveContainer" containerID="a0fe97f40192cfa5401b2f79e212022ae73459b3f984e276f4adca17e93d0c50" Nov 25 22:37:23 crc kubenswrapper[4910]: I1125 22:37:23.098553 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:37:23 crc kubenswrapper[4910]: I1125 22:37:23.099232 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:37:53 crc kubenswrapper[4910]: I1125 22:37:53.099968 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:37:53 crc kubenswrapper[4910]: I1125 22:37:53.101134 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:38:23 crc kubenswrapper[4910]: I1125 22:38:23.099217 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:38:23 crc kubenswrapper[4910]: I1125 22:38:23.102791 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:38:23 crc kubenswrapper[4910]: I1125 22:38:23.103644 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 22:38:23 crc kubenswrapper[4910]: I1125 22:38:23.105950 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ba9869401faf0daa1e3b20ff8fe3e39564363a9833a4e10f10eec05aeff7e2b2"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 22:38:23 crc kubenswrapper[4910]: I1125 22:38:23.106173 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://ba9869401faf0daa1e3b20ff8fe3e39564363a9833a4e10f10eec05aeff7e2b2" gracePeriod=600 Nov 25 22:38:23 crc kubenswrapper[4910]: I1125 22:38:23.358858 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="ba9869401faf0daa1e3b20ff8fe3e39564363a9833a4e10f10eec05aeff7e2b2" exitCode=0 Nov 25 22:38:23 crc kubenswrapper[4910]: I1125 22:38:23.359051 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"ba9869401faf0daa1e3b20ff8fe3e39564363a9833a4e10f10eec05aeff7e2b2"} Nov 25 22:38:23 crc kubenswrapper[4910]: I1125 22:38:23.359297 4910 scope.go:117] "RemoveContainer" containerID="ee9f0067879a5a9b60f6b721eaae7a4cd84234590cfa9bcb12bcb54bca3bdc12" Nov 25 22:38:24 crc kubenswrapper[4910]: I1125 22:38:24.376738 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc"} Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.829095 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-4srbz/must-gather-5zv5t"] Nov 25 22:38:29 crc kubenswrapper[4910]: E1125 22:38:29.830158 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9de7ad82-ff82-47a7-9393-cf5ce90f749e" containerName="gather" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.830173 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9de7ad82-ff82-47a7-9393-cf5ce90f749e" containerName="gather" Nov 25 22:38:29 crc kubenswrapper[4910]: E1125 22:38:29.830196 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9de7ad82-ff82-47a7-9393-cf5ce90f749e" containerName="copy" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.830201 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="9de7ad82-ff82-47a7-9393-cf5ce90f749e" containerName="copy" Nov 25 22:38:29 crc kubenswrapper[4910]: E1125 22:38:29.830209 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d5c098b-e243-4068-9e49-bc557af194f6" containerName="container-00" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.830216 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d5c098b-e243-4068-9e49-bc557af194f6" containerName="container-00" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.831106 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="9de7ad82-ff82-47a7-9393-cf5ce90f749e" containerName="gather" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.831127 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="9de7ad82-ff82-47a7-9393-cf5ce90f749e" containerName="copy" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.831150 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d5c098b-e243-4068-9e49-bc557af194f6" containerName="container-00" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.832280 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/must-gather-5zv5t" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.835182 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-4srbz"/"openshift-service-ca.crt" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.835450 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-4srbz"/"kube-root-ca.crt" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.837430 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-4srbz"/"default-dockercfg-nn5bz" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.853319 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pb4j4\" (UniqueName: \"kubernetes.io/projected/a9e9b562-e71e-40a8-8cfc-7f9564f635db-kube-api-access-pb4j4\") pod \"must-gather-5zv5t\" (UID: \"a9e9b562-e71e-40a8-8cfc-7f9564f635db\") " pod="openshift-must-gather-4srbz/must-gather-5zv5t" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.853398 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a9e9b562-e71e-40a8-8cfc-7f9564f635db-must-gather-output\") pod \"must-gather-5zv5t\" (UID: \"a9e9b562-e71e-40a8-8cfc-7f9564f635db\") " pod="openshift-must-gather-4srbz/must-gather-5zv5t" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.860627 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-4srbz/must-gather-5zv5t"] Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.955778 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pb4j4\" (UniqueName: \"kubernetes.io/projected/a9e9b562-e71e-40a8-8cfc-7f9564f635db-kube-api-access-pb4j4\") pod \"must-gather-5zv5t\" (UID: \"a9e9b562-e71e-40a8-8cfc-7f9564f635db\") " pod="openshift-must-gather-4srbz/must-gather-5zv5t" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.955992 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a9e9b562-e71e-40a8-8cfc-7f9564f635db-must-gather-output\") pod \"must-gather-5zv5t\" (UID: \"a9e9b562-e71e-40a8-8cfc-7f9564f635db\") " pod="openshift-must-gather-4srbz/must-gather-5zv5t" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.956545 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a9e9b562-e71e-40a8-8cfc-7f9564f635db-must-gather-output\") pod \"must-gather-5zv5t\" (UID: \"a9e9b562-e71e-40a8-8cfc-7f9564f635db\") " pod="openshift-must-gather-4srbz/must-gather-5zv5t" Nov 25 22:38:29 crc kubenswrapper[4910]: I1125 22:38:29.984087 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pb4j4\" (UniqueName: \"kubernetes.io/projected/a9e9b562-e71e-40a8-8cfc-7f9564f635db-kube-api-access-pb4j4\") pod \"must-gather-5zv5t\" (UID: \"a9e9b562-e71e-40a8-8cfc-7f9564f635db\") " pod="openshift-must-gather-4srbz/must-gather-5zv5t" Nov 25 22:38:30 crc kubenswrapper[4910]: I1125 22:38:30.164770 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/must-gather-5zv5t" Nov 25 22:38:31 crc kubenswrapper[4910]: I1125 22:38:30.689184 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-4srbz/must-gather-5zv5t"] Nov 25 22:38:31 crc kubenswrapper[4910]: I1125 22:38:31.469958 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4srbz/must-gather-5zv5t" event={"ID":"a9e9b562-e71e-40a8-8cfc-7f9564f635db","Type":"ContainerStarted","Data":"365d183e3c7f847fe0399b0fcaf3687761b1da201282feb3e6a3feba7e85a2bb"} Nov 25 22:38:31 crc kubenswrapper[4910]: I1125 22:38:31.471057 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4srbz/must-gather-5zv5t" event={"ID":"a9e9b562-e71e-40a8-8cfc-7f9564f635db","Type":"ContainerStarted","Data":"fe6b8e2cfb5993a4eca343957b8154e77df17a01cb6a0d32a7df22810b2bb53a"} Nov 25 22:38:31 crc kubenswrapper[4910]: I1125 22:38:31.471079 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4srbz/must-gather-5zv5t" event={"ID":"a9e9b562-e71e-40a8-8cfc-7f9564f635db","Type":"ContainerStarted","Data":"5b8290de364e3dbd11902f10269ca5cbec56db2f44a540050744dc3a883694ae"} Nov 25 22:38:31 crc kubenswrapper[4910]: I1125 22:38:31.494162 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-4srbz/must-gather-5zv5t" podStartSLOduration=2.494135367 podStartE2EDuration="2.494135367s" podCreationTimestamp="2025-11-25 22:38:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 22:38:31.491205589 +0000 UTC m=+4066.953681941" watchObservedRunningTime="2025-11-25 22:38:31.494135367 +0000 UTC m=+4066.956611689" Nov 25 22:38:35 crc kubenswrapper[4910]: I1125 22:38:35.225775 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-4srbz/crc-debug-wd7vd"] Nov 25 22:38:35 crc kubenswrapper[4910]: I1125 22:38:35.228880 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/crc-debug-wd7vd" Nov 25 22:38:35 crc kubenswrapper[4910]: I1125 22:38:35.287200 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/420be706-fdb7-47c0-aa06-e392dba08cf1-host\") pod \"crc-debug-wd7vd\" (UID: \"420be706-fdb7-47c0-aa06-e392dba08cf1\") " pod="openshift-must-gather-4srbz/crc-debug-wd7vd" Nov 25 22:38:35 crc kubenswrapper[4910]: I1125 22:38:35.287670 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4fp5k\" (UniqueName: \"kubernetes.io/projected/420be706-fdb7-47c0-aa06-e392dba08cf1-kube-api-access-4fp5k\") pod \"crc-debug-wd7vd\" (UID: \"420be706-fdb7-47c0-aa06-e392dba08cf1\") " pod="openshift-must-gather-4srbz/crc-debug-wd7vd" Nov 25 22:38:35 crc kubenswrapper[4910]: I1125 22:38:35.389589 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/420be706-fdb7-47c0-aa06-e392dba08cf1-host\") pod \"crc-debug-wd7vd\" (UID: \"420be706-fdb7-47c0-aa06-e392dba08cf1\") " pod="openshift-must-gather-4srbz/crc-debug-wd7vd" Nov 25 22:38:35 crc kubenswrapper[4910]: I1125 22:38:35.389765 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4fp5k\" (UniqueName: \"kubernetes.io/projected/420be706-fdb7-47c0-aa06-e392dba08cf1-kube-api-access-4fp5k\") pod \"crc-debug-wd7vd\" (UID: \"420be706-fdb7-47c0-aa06-e392dba08cf1\") " pod="openshift-must-gather-4srbz/crc-debug-wd7vd" Nov 25 22:38:35 crc kubenswrapper[4910]: I1125 22:38:35.389779 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/420be706-fdb7-47c0-aa06-e392dba08cf1-host\") pod \"crc-debug-wd7vd\" (UID: \"420be706-fdb7-47c0-aa06-e392dba08cf1\") " pod="openshift-must-gather-4srbz/crc-debug-wd7vd" Nov 25 22:38:35 crc kubenswrapper[4910]: I1125 22:38:35.413791 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4fp5k\" (UniqueName: \"kubernetes.io/projected/420be706-fdb7-47c0-aa06-e392dba08cf1-kube-api-access-4fp5k\") pod \"crc-debug-wd7vd\" (UID: \"420be706-fdb7-47c0-aa06-e392dba08cf1\") " pod="openshift-must-gather-4srbz/crc-debug-wd7vd" Nov 25 22:38:35 crc kubenswrapper[4910]: I1125 22:38:35.554194 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/crc-debug-wd7vd" Nov 25 22:38:35 crc kubenswrapper[4910]: W1125 22:38:35.592971 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod420be706_fdb7_47c0_aa06_e392dba08cf1.slice/crio-2c0c9bb00f6c828b4af9af50c9a71a913c30254742900abe5f3c06c6cb6699cb WatchSource:0}: Error finding container 2c0c9bb00f6c828b4af9af50c9a71a913c30254742900abe5f3c06c6cb6699cb: Status 404 returned error can't find the container with id 2c0c9bb00f6c828b4af9af50c9a71a913c30254742900abe5f3c06c6cb6699cb Nov 25 22:38:36 crc kubenswrapper[4910]: I1125 22:38:36.545389 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4srbz/crc-debug-wd7vd" event={"ID":"420be706-fdb7-47c0-aa06-e392dba08cf1","Type":"ContainerStarted","Data":"4887eae438c09d44692bb940069f937a1b6ff0f004d524b0632d25210d944b31"} Nov 25 22:38:36 crc kubenswrapper[4910]: I1125 22:38:36.546225 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4srbz/crc-debug-wd7vd" event={"ID":"420be706-fdb7-47c0-aa06-e392dba08cf1","Type":"ContainerStarted","Data":"2c0c9bb00f6c828b4af9af50c9a71a913c30254742900abe5f3c06c6cb6699cb"} Nov 25 22:38:36 crc kubenswrapper[4910]: I1125 22:38:36.601660 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-4srbz/crc-debug-wd7vd" podStartSLOduration=1.601630948 podStartE2EDuration="1.601630948s" podCreationTimestamp="2025-11-25 22:38:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-25 22:38:36.588542439 +0000 UTC m=+4072.051018761" watchObservedRunningTime="2025-11-25 22:38:36.601630948 +0000 UTC m=+4072.064107280" Nov 25 22:39:10 crc kubenswrapper[4910]: I1125 22:39:10.910718 4910 generic.go:334] "Generic (PLEG): container finished" podID="420be706-fdb7-47c0-aa06-e392dba08cf1" containerID="4887eae438c09d44692bb940069f937a1b6ff0f004d524b0632d25210d944b31" exitCode=0 Nov 25 22:39:10 crc kubenswrapper[4910]: I1125 22:39:10.911441 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4srbz/crc-debug-wd7vd" event={"ID":"420be706-fdb7-47c0-aa06-e392dba08cf1","Type":"ContainerDied","Data":"4887eae438c09d44692bb940069f937a1b6ff0f004d524b0632d25210d944b31"} Nov 25 22:39:12 crc kubenswrapper[4910]: I1125 22:39:12.036852 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/crc-debug-wd7vd" Nov 25 22:39:12 crc kubenswrapper[4910]: I1125 22:39:12.079595 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-4srbz/crc-debug-wd7vd"] Nov 25 22:39:12 crc kubenswrapper[4910]: I1125 22:39:12.088469 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-4srbz/crc-debug-wd7vd"] Nov 25 22:39:12 crc kubenswrapper[4910]: I1125 22:39:12.152048 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/420be706-fdb7-47c0-aa06-e392dba08cf1-host\") pod \"420be706-fdb7-47c0-aa06-e392dba08cf1\" (UID: \"420be706-fdb7-47c0-aa06-e392dba08cf1\") " Nov 25 22:39:12 crc kubenswrapper[4910]: I1125 22:39:12.152130 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4fp5k\" (UniqueName: \"kubernetes.io/projected/420be706-fdb7-47c0-aa06-e392dba08cf1-kube-api-access-4fp5k\") pod \"420be706-fdb7-47c0-aa06-e392dba08cf1\" (UID: \"420be706-fdb7-47c0-aa06-e392dba08cf1\") " Nov 25 22:39:12 crc kubenswrapper[4910]: I1125 22:39:12.152217 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/420be706-fdb7-47c0-aa06-e392dba08cf1-host" (OuterVolumeSpecName: "host") pod "420be706-fdb7-47c0-aa06-e392dba08cf1" (UID: "420be706-fdb7-47c0-aa06-e392dba08cf1"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 22:39:12 crc kubenswrapper[4910]: I1125 22:39:12.152745 4910 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/420be706-fdb7-47c0-aa06-e392dba08cf1-host\") on node \"crc\" DevicePath \"\"" Nov 25 22:39:12 crc kubenswrapper[4910]: I1125 22:39:12.162964 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/420be706-fdb7-47c0-aa06-e392dba08cf1-kube-api-access-4fp5k" (OuterVolumeSpecName: "kube-api-access-4fp5k") pod "420be706-fdb7-47c0-aa06-e392dba08cf1" (UID: "420be706-fdb7-47c0-aa06-e392dba08cf1"). InnerVolumeSpecName "kube-api-access-4fp5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:39:12 crc kubenswrapper[4910]: I1125 22:39:12.255495 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4fp5k\" (UniqueName: \"kubernetes.io/projected/420be706-fdb7-47c0-aa06-e392dba08cf1-kube-api-access-4fp5k\") on node \"crc\" DevicePath \"\"" Nov 25 22:39:12 crc kubenswrapper[4910]: I1125 22:39:12.932576 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2c0c9bb00f6c828b4af9af50c9a71a913c30254742900abe5f3c06c6cb6699cb" Nov 25 22:39:12 crc kubenswrapper[4910]: I1125 22:39:12.932664 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/crc-debug-wd7vd" Nov 25 22:39:13 crc kubenswrapper[4910]: I1125 22:39:13.222589 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="420be706-fdb7-47c0-aa06-e392dba08cf1" path="/var/lib/kubelet/pods/420be706-fdb7-47c0-aa06-e392dba08cf1/volumes" Nov 25 22:39:13 crc kubenswrapper[4910]: I1125 22:39:13.312692 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-4srbz/crc-debug-kp7nc"] Nov 25 22:39:13 crc kubenswrapper[4910]: E1125 22:39:13.313311 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="420be706-fdb7-47c0-aa06-e392dba08cf1" containerName="container-00" Nov 25 22:39:13 crc kubenswrapper[4910]: I1125 22:39:13.313336 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="420be706-fdb7-47c0-aa06-e392dba08cf1" containerName="container-00" Nov 25 22:39:13 crc kubenswrapper[4910]: I1125 22:39:13.313569 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="420be706-fdb7-47c0-aa06-e392dba08cf1" containerName="container-00" Nov 25 22:39:13 crc kubenswrapper[4910]: I1125 22:39:13.314427 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/crc-debug-kp7nc" Nov 25 22:39:13 crc kubenswrapper[4910]: I1125 22:39:13.481394 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6fdf5604-f06d-4b71-8a04-96b8e0475373-host\") pod \"crc-debug-kp7nc\" (UID: \"6fdf5604-f06d-4b71-8a04-96b8e0475373\") " pod="openshift-must-gather-4srbz/crc-debug-kp7nc" Nov 25 22:39:13 crc kubenswrapper[4910]: I1125 22:39:13.481722 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8qtv\" (UniqueName: \"kubernetes.io/projected/6fdf5604-f06d-4b71-8a04-96b8e0475373-kube-api-access-v8qtv\") pod \"crc-debug-kp7nc\" (UID: \"6fdf5604-f06d-4b71-8a04-96b8e0475373\") " pod="openshift-must-gather-4srbz/crc-debug-kp7nc" Nov 25 22:39:13 crc kubenswrapper[4910]: I1125 22:39:13.583888 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6fdf5604-f06d-4b71-8a04-96b8e0475373-host\") pod \"crc-debug-kp7nc\" (UID: \"6fdf5604-f06d-4b71-8a04-96b8e0475373\") " pod="openshift-must-gather-4srbz/crc-debug-kp7nc" Nov 25 22:39:13 crc kubenswrapper[4910]: I1125 22:39:13.584019 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8qtv\" (UniqueName: \"kubernetes.io/projected/6fdf5604-f06d-4b71-8a04-96b8e0475373-kube-api-access-v8qtv\") pod \"crc-debug-kp7nc\" (UID: \"6fdf5604-f06d-4b71-8a04-96b8e0475373\") " pod="openshift-must-gather-4srbz/crc-debug-kp7nc" Nov 25 22:39:13 crc kubenswrapper[4910]: I1125 22:39:13.584126 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6fdf5604-f06d-4b71-8a04-96b8e0475373-host\") pod \"crc-debug-kp7nc\" (UID: \"6fdf5604-f06d-4b71-8a04-96b8e0475373\") " pod="openshift-must-gather-4srbz/crc-debug-kp7nc" Nov 25 22:39:13 crc kubenswrapper[4910]: I1125 22:39:13.614462 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8qtv\" (UniqueName: \"kubernetes.io/projected/6fdf5604-f06d-4b71-8a04-96b8e0475373-kube-api-access-v8qtv\") pod \"crc-debug-kp7nc\" (UID: \"6fdf5604-f06d-4b71-8a04-96b8e0475373\") " pod="openshift-must-gather-4srbz/crc-debug-kp7nc" Nov 25 22:39:13 crc kubenswrapper[4910]: I1125 22:39:13.640928 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/crc-debug-kp7nc" Nov 25 22:39:13 crc kubenswrapper[4910]: I1125 22:39:13.957052 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4srbz/crc-debug-kp7nc" event={"ID":"6fdf5604-f06d-4b71-8a04-96b8e0475373","Type":"ContainerStarted","Data":"a6e66d98ab6ad8584dbec2f82c9794fa20ea9c3a588b88e50903f09eae3a3bf6"} Nov 25 22:39:14 crc kubenswrapper[4910]: I1125 22:39:14.988853 4910 generic.go:334] "Generic (PLEG): container finished" podID="6fdf5604-f06d-4b71-8a04-96b8e0475373" containerID="5a902b4cab2998513adcecaa23d3d7d13f2061d7e63332f967f63757849c9846" exitCode=0 Nov 25 22:39:14 crc kubenswrapper[4910]: I1125 22:39:14.989449 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4srbz/crc-debug-kp7nc" event={"ID":"6fdf5604-f06d-4b71-8a04-96b8e0475373","Type":"ContainerDied","Data":"5a902b4cab2998513adcecaa23d3d7d13f2061d7e63332f967f63757849c9846"} Nov 25 22:39:15 crc kubenswrapper[4910]: I1125 22:39:15.506999 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-4srbz/crc-debug-kp7nc"] Nov 25 22:39:15 crc kubenswrapper[4910]: I1125 22:39:15.516007 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-4srbz/crc-debug-kp7nc"] Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.115966 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/crc-debug-kp7nc" Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.244154 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6fdf5604-f06d-4b71-8a04-96b8e0475373-host\") pod \"6fdf5604-f06d-4b71-8a04-96b8e0475373\" (UID: \"6fdf5604-f06d-4b71-8a04-96b8e0475373\") " Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.244291 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6fdf5604-f06d-4b71-8a04-96b8e0475373-host" (OuterVolumeSpecName: "host") pod "6fdf5604-f06d-4b71-8a04-96b8e0475373" (UID: "6fdf5604-f06d-4b71-8a04-96b8e0475373"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.244398 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8qtv\" (UniqueName: \"kubernetes.io/projected/6fdf5604-f06d-4b71-8a04-96b8e0475373-kube-api-access-v8qtv\") pod \"6fdf5604-f06d-4b71-8a04-96b8e0475373\" (UID: \"6fdf5604-f06d-4b71-8a04-96b8e0475373\") " Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.245261 4910 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/6fdf5604-f06d-4b71-8a04-96b8e0475373-host\") on node \"crc\" DevicePath \"\"" Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.251730 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fdf5604-f06d-4b71-8a04-96b8e0475373-kube-api-access-v8qtv" (OuterVolumeSpecName: "kube-api-access-v8qtv") pod "6fdf5604-f06d-4b71-8a04-96b8e0475373" (UID: "6fdf5604-f06d-4b71-8a04-96b8e0475373"). InnerVolumeSpecName "kube-api-access-v8qtv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.347068 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8qtv\" (UniqueName: \"kubernetes.io/projected/6fdf5604-f06d-4b71-8a04-96b8e0475373-kube-api-access-v8qtv\") on node \"crc\" DevicePath \"\"" Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.750268 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-4srbz/crc-debug-5ncb5"] Nov 25 22:39:16 crc kubenswrapper[4910]: E1125 22:39:16.751051 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fdf5604-f06d-4b71-8a04-96b8e0475373" containerName="container-00" Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.751123 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fdf5604-f06d-4b71-8a04-96b8e0475373" containerName="container-00" Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.751389 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fdf5604-f06d-4b71-8a04-96b8e0475373" containerName="container-00" Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.752215 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/crc-debug-5ncb5" Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.860416 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9x8bf\" (UniqueName: \"kubernetes.io/projected/982a3477-3664-45f9-9aba-9dfda13d6ea8-kube-api-access-9x8bf\") pod \"crc-debug-5ncb5\" (UID: \"982a3477-3664-45f9-9aba-9dfda13d6ea8\") " pod="openshift-must-gather-4srbz/crc-debug-5ncb5" Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.860569 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/982a3477-3664-45f9-9aba-9dfda13d6ea8-host\") pod \"crc-debug-5ncb5\" (UID: \"982a3477-3664-45f9-9aba-9dfda13d6ea8\") " pod="openshift-must-gather-4srbz/crc-debug-5ncb5" Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.963279 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9x8bf\" (UniqueName: \"kubernetes.io/projected/982a3477-3664-45f9-9aba-9dfda13d6ea8-kube-api-access-9x8bf\") pod \"crc-debug-5ncb5\" (UID: \"982a3477-3664-45f9-9aba-9dfda13d6ea8\") " pod="openshift-must-gather-4srbz/crc-debug-5ncb5" Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.963454 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/982a3477-3664-45f9-9aba-9dfda13d6ea8-host\") pod \"crc-debug-5ncb5\" (UID: \"982a3477-3664-45f9-9aba-9dfda13d6ea8\") " pod="openshift-must-gather-4srbz/crc-debug-5ncb5" Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.963642 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/982a3477-3664-45f9-9aba-9dfda13d6ea8-host\") pod \"crc-debug-5ncb5\" (UID: \"982a3477-3664-45f9-9aba-9dfda13d6ea8\") " pod="openshift-must-gather-4srbz/crc-debug-5ncb5" Nov 25 22:39:16 crc kubenswrapper[4910]: I1125 22:39:16.989440 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9x8bf\" (UniqueName: \"kubernetes.io/projected/982a3477-3664-45f9-9aba-9dfda13d6ea8-kube-api-access-9x8bf\") pod \"crc-debug-5ncb5\" (UID: \"982a3477-3664-45f9-9aba-9dfda13d6ea8\") " pod="openshift-must-gather-4srbz/crc-debug-5ncb5" Nov 25 22:39:17 crc kubenswrapper[4910]: I1125 22:39:17.012371 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6e66d98ab6ad8584dbec2f82c9794fa20ea9c3a588b88e50903f09eae3a3bf6" Nov 25 22:39:17 crc kubenswrapper[4910]: I1125 22:39:17.012458 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/crc-debug-kp7nc" Nov 25 22:39:17 crc kubenswrapper[4910]: I1125 22:39:17.074966 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/crc-debug-5ncb5" Nov 25 22:39:17 crc kubenswrapper[4910]: W1125 22:39:17.116754 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod982a3477_3664_45f9_9aba_9dfda13d6ea8.slice/crio-68648929af437c039665a31fd1e950239bcfa95c6517c387cce5483a37fda5a0 WatchSource:0}: Error finding container 68648929af437c039665a31fd1e950239bcfa95c6517c387cce5483a37fda5a0: Status 404 returned error can't find the container with id 68648929af437c039665a31fd1e950239bcfa95c6517c387cce5483a37fda5a0 Nov 25 22:39:17 crc kubenswrapper[4910]: I1125 22:39:17.224358 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fdf5604-f06d-4b71-8a04-96b8e0475373" path="/var/lib/kubelet/pods/6fdf5604-f06d-4b71-8a04-96b8e0475373/volumes" Nov 25 22:39:18 crc kubenswrapper[4910]: I1125 22:39:18.024207 4910 generic.go:334] "Generic (PLEG): container finished" podID="982a3477-3664-45f9-9aba-9dfda13d6ea8" containerID="9e2acfe9ab5ae5f32f2d9f77d9cc9e91c66b9e22fa1e8e30f12a27520e6a5e80" exitCode=0 Nov 25 22:39:18 crc kubenswrapper[4910]: I1125 22:39:18.024296 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4srbz/crc-debug-5ncb5" event={"ID":"982a3477-3664-45f9-9aba-9dfda13d6ea8","Type":"ContainerDied","Data":"9e2acfe9ab5ae5f32f2d9f77d9cc9e91c66b9e22fa1e8e30f12a27520e6a5e80"} Nov 25 22:39:18 crc kubenswrapper[4910]: I1125 22:39:18.024695 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4srbz/crc-debug-5ncb5" event={"ID":"982a3477-3664-45f9-9aba-9dfda13d6ea8","Type":"ContainerStarted","Data":"68648929af437c039665a31fd1e950239bcfa95c6517c387cce5483a37fda5a0"} Nov 25 22:39:18 crc kubenswrapper[4910]: I1125 22:39:18.075164 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-4srbz/crc-debug-5ncb5"] Nov 25 22:39:18 crc kubenswrapper[4910]: I1125 22:39:18.098167 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-4srbz/crc-debug-5ncb5"] Nov 25 22:39:19 crc kubenswrapper[4910]: I1125 22:39:19.151073 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/crc-debug-5ncb5" Nov 25 22:39:19 crc kubenswrapper[4910]: I1125 22:39:19.339194 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9x8bf\" (UniqueName: \"kubernetes.io/projected/982a3477-3664-45f9-9aba-9dfda13d6ea8-kube-api-access-9x8bf\") pod \"982a3477-3664-45f9-9aba-9dfda13d6ea8\" (UID: \"982a3477-3664-45f9-9aba-9dfda13d6ea8\") " Nov 25 22:39:19 crc kubenswrapper[4910]: I1125 22:39:19.339423 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/982a3477-3664-45f9-9aba-9dfda13d6ea8-host\") pod \"982a3477-3664-45f9-9aba-9dfda13d6ea8\" (UID: \"982a3477-3664-45f9-9aba-9dfda13d6ea8\") " Nov 25 22:39:19 crc kubenswrapper[4910]: I1125 22:39:19.339531 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/982a3477-3664-45f9-9aba-9dfda13d6ea8-host" (OuterVolumeSpecName: "host") pod "982a3477-3664-45f9-9aba-9dfda13d6ea8" (UID: "982a3477-3664-45f9-9aba-9dfda13d6ea8"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 25 22:39:19 crc kubenswrapper[4910]: I1125 22:39:19.341534 4910 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/982a3477-3664-45f9-9aba-9dfda13d6ea8-host\") on node \"crc\" DevicePath \"\"" Nov 25 22:39:19 crc kubenswrapper[4910]: I1125 22:39:19.358768 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/982a3477-3664-45f9-9aba-9dfda13d6ea8-kube-api-access-9x8bf" (OuterVolumeSpecName: "kube-api-access-9x8bf") pod "982a3477-3664-45f9-9aba-9dfda13d6ea8" (UID: "982a3477-3664-45f9-9aba-9dfda13d6ea8"). InnerVolumeSpecName "kube-api-access-9x8bf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:39:19 crc kubenswrapper[4910]: I1125 22:39:19.445175 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9x8bf\" (UniqueName: \"kubernetes.io/projected/982a3477-3664-45f9-9aba-9dfda13d6ea8-kube-api-access-9x8bf\") on node \"crc\" DevicePath \"\"" Nov 25 22:39:20 crc kubenswrapper[4910]: I1125 22:39:20.050563 4910 scope.go:117] "RemoveContainer" containerID="9e2acfe9ab5ae5f32f2d9f77d9cc9e91c66b9e22fa1e8e30f12a27520e6a5e80" Nov 25 22:39:20 crc kubenswrapper[4910]: I1125 22:39:20.050705 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/crc-debug-5ncb5" Nov 25 22:39:21 crc kubenswrapper[4910]: I1125 22:39:21.219817 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="982a3477-3664-45f9-9aba-9dfda13d6ea8" path="/var/lib/kubelet/pods/982a3477-3664-45f9-9aba-9dfda13d6ea8/volumes" Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.186843 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-97fj7"] Nov 25 22:39:26 crc kubenswrapper[4910]: E1125 22:39:26.188486 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="982a3477-3664-45f9-9aba-9dfda13d6ea8" containerName="container-00" Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.188506 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="982a3477-3664-45f9-9aba-9dfda13d6ea8" containerName="container-00" Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.188810 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="982a3477-3664-45f9-9aba-9dfda13d6ea8" containerName="container-00" Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.191192 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.218566 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-catalog-content\") pod \"redhat-operators-97fj7\" (UID: \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\") " pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.218677 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-utilities\") pod \"redhat-operators-97fj7\" (UID: \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\") " pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.218774 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dw4m\" (UniqueName: \"kubernetes.io/projected/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-kube-api-access-2dw4m\") pod \"redhat-operators-97fj7\" (UID: \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\") " pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.224099 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-97fj7"] Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.321043 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-utilities\") pod \"redhat-operators-97fj7\" (UID: \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\") " pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.321219 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dw4m\" (UniqueName: \"kubernetes.io/projected/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-kube-api-access-2dw4m\") pod \"redhat-operators-97fj7\" (UID: \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\") " pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.321343 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-catalog-content\") pod \"redhat-operators-97fj7\" (UID: \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\") " pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.321929 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-catalog-content\") pod \"redhat-operators-97fj7\" (UID: \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\") " pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.322464 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-utilities\") pod \"redhat-operators-97fj7\" (UID: \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\") " pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.347981 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dw4m\" (UniqueName: \"kubernetes.io/projected/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-kube-api-access-2dw4m\") pod \"redhat-operators-97fj7\" (UID: \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\") " pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:26 crc kubenswrapper[4910]: I1125 22:39:26.528348 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:27 crc kubenswrapper[4910]: I1125 22:39:27.028338 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-97fj7"] Nov 25 22:39:27 crc kubenswrapper[4910]: I1125 22:39:27.166926 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-97fj7" event={"ID":"f0c5adee-b0d4-4b20-807b-afb07f6be0cc","Type":"ContainerStarted","Data":"4a8bd7bb77d47eb7a2e31095c7247269c25c828f03b7a68bb9612d78a61f32b3"} Nov 25 22:39:28 crc kubenswrapper[4910]: I1125 22:39:28.178504 4910 generic.go:334] "Generic (PLEG): container finished" podID="f0c5adee-b0d4-4b20-807b-afb07f6be0cc" containerID="2b4bb84d2bb3fb658c0028ef0a85a3a519abc8709f01fd899c4633268f560ff1" exitCode=0 Nov 25 22:39:28 crc kubenswrapper[4910]: I1125 22:39:28.178588 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-97fj7" event={"ID":"f0c5adee-b0d4-4b20-807b-afb07f6be0cc","Type":"ContainerDied","Data":"2b4bb84d2bb3fb658c0028ef0a85a3a519abc8709f01fd899c4633268f560ff1"} Nov 25 22:39:28 crc kubenswrapper[4910]: I1125 22:39:28.182306 4910 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 25 22:39:29 crc kubenswrapper[4910]: I1125 22:39:29.221233 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-97fj7" event={"ID":"f0c5adee-b0d4-4b20-807b-afb07f6be0cc","Type":"ContainerStarted","Data":"ddf454929a35cf1cf53b87844a733f4da2a97fe31a1286a3af5635483d6d5da8"} Nov 25 22:39:31 crc kubenswrapper[4910]: I1125 22:39:31.224231 4910 generic.go:334] "Generic (PLEG): container finished" podID="f0c5adee-b0d4-4b20-807b-afb07f6be0cc" containerID="ddf454929a35cf1cf53b87844a733f4da2a97fe31a1286a3af5635483d6d5da8" exitCode=0 Nov 25 22:39:31 crc kubenswrapper[4910]: I1125 22:39:31.224372 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-97fj7" event={"ID":"f0c5adee-b0d4-4b20-807b-afb07f6be0cc","Type":"ContainerDied","Data":"ddf454929a35cf1cf53b87844a733f4da2a97fe31a1286a3af5635483d6d5da8"} Nov 25 22:39:32 crc kubenswrapper[4910]: I1125 22:39:32.243217 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-97fj7" event={"ID":"f0c5adee-b0d4-4b20-807b-afb07f6be0cc","Type":"ContainerStarted","Data":"a90d447b6653fb2fd2aba90c3791efd58832d6ddaf62fd91f91d0f8761c582b1"} Nov 25 22:39:32 crc kubenswrapper[4910]: I1125 22:39:32.272651 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-97fj7" podStartSLOduration=2.810643012 podStartE2EDuration="6.272632224s" podCreationTimestamp="2025-11-25 22:39:26 +0000 UTC" firstStartedPulling="2025-11-25 22:39:28.182014971 +0000 UTC m=+4123.644491293" lastFinishedPulling="2025-11-25 22:39:31.644004153 +0000 UTC m=+4127.106480505" observedRunningTime="2025-11-25 22:39:32.271472863 +0000 UTC m=+4127.733949195" watchObservedRunningTime="2025-11-25 22:39:32.272632224 +0000 UTC m=+4127.735108546" Nov 25 22:39:36 crc kubenswrapper[4910]: I1125 22:39:36.529092 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:36 crc kubenswrapper[4910]: I1125 22:39:36.529788 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:37 crc kubenswrapper[4910]: I1125 22:39:37.591027 4910 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-97fj7" podUID="f0c5adee-b0d4-4b20-807b-afb07f6be0cc" containerName="registry-server" probeResult="failure" output=< Nov 25 22:39:37 crc kubenswrapper[4910]: timeout: failed to connect service ":50051" within 1s Nov 25 22:39:37 crc kubenswrapper[4910]: > Nov 25 22:39:46 crc kubenswrapper[4910]: I1125 22:39:46.624233 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:46 crc kubenswrapper[4910]: I1125 22:39:46.714058 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:46 crc kubenswrapper[4910]: I1125 22:39:46.881026 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-97fj7"] Nov 25 22:39:48 crc kubenswrapper[4910]: I1125 22:39:48.418759 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-97fj7" podUID="f0c5adee-b0d4-4b20-807b-afb07f6be0cc" containerName="registry-server" containerID="cri-o://a90d447b6653fb2fd2aba90c3791efd58832d6ddaf62fd91f91d0f8761c582b1" gracePeriod=2 Nov 25 22:39:49 crc kubenswrapper[4910]: I1125 22:39:49.435001 4910 generic.go:334] "Generic (PLEG): container finished" podID="f0c5adee-b0d4-4b20-807b-afb07f6be0cc" containerID="a90d447b6653fb2fd2aba90c3791efd58832d6ddaf62fd91f91d0f8761c582b1" exitCode=0 Nov 25 22:39:49 crc kubenswrapper[4910]: I1125 22:39:49.435120 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-97fj7" event={"ID":"f0c5adee-b0d4-4b20-807b-afb07f6be0cc","Type":"ContainerDied","Data":"a90d447b6653fb2fd2aba90c3791efd58832d6ddaf62fd91f91d0f8761c582b1"} Nov 25 22:39:49 crc kubenswrapper[4910]: I1125 22:39:49.620097 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:49 crc kubenswrapper[4910]: I1125 22:39:49.729550 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-utilities\") pod \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\" (UID: \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\") " Nov 25 22:39:49 crc kubenswrapper[4910]: I1125 22:39:49.729771 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-catalog-content\") pod \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\" (UID: \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\") " Nov 25 22:39:49 crc kubenswrapper[4910]: I1125 22:39:49.729891 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2dw4m\" (UniqueName: \"kubernetes.io/projected/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-kube-api-access-2dw4m\") pod \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\" (UID: \"f0c5adee-b0d4-4b20-807b-afb07f6be0cc\") " Nov 25 22:39:49 crc kubenswrapper[4910]: I1125 22:39:49.730939 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-utilities" (OuterVolumeSpecName: "utilities") pod "f0c5adee-b0d4-4b20-807b-afb07f6be0cc" (UID: "f0c5adee-b0d4-4b20-807b-afb07f6be0cc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:39:49 crc kubenswrapper[4910]: I1125 22:39:49.738079 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-kube-api-access-2dw4m" (OuterVolumeSpecName: "kube-api-access-2dw4m") pod "f0c5adee-b0d4-4b20-807b-afb07f6be0cc" (UID: "f0c5adee-b0d4-4b20-807b-afb07f6be0cc"). InnerVolumeSpecName "kube-api-access-2dw4m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:39:49 crc kubenswrapper[4910]: I1125 22:39:49.834324 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2dw4m\" (UniqueName: \"kubernetes.io/projected/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-kube-api-access-2dw4m\") on node \"crc\" DevicePath \"\"" Nov 25 22:39:49 crc kubenswrapper[4910]: I1125 22:39:49.834377 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:39:49 crc kubenswrapper[4910]: I1125 22:39:49.844671 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f0c5adee-b0d4-4b20-807b-afb07f6be0cc" (UID: "f0c5adee-b0d4-4b20-807b-afb07f6be0cc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:39:49 crc kubenswrapper[4910]: I1125 22:39:49.936024 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f0c5adee-b0d4-4b20-807b-afb07f6be0cc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:39:50 crc kubenswrapper[4910]: I1125 22:39:50.492163 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-97fj7" event={"ID":"f0c5adee-b0d4-4b20-807b-afb07f6be0cc","Type":"ContainerDied","Data":"4a8bd7bb77d47eb7a2e31095c7247269c25c828f03b7a68bb9612d78a61f32b3"} Nov 25 22:39:50 crc kubenswrapper[4910]: I1125 22:39:50.492239 4910 scope.go:117] "RemoveContainer" containerID="a90d447b6653fb2fd2aba90c3791efd58832d6ddaf62fd91f91d0f8761c582b1" Nov 25 22:39:50 crc kubenswrapper[4910]: I1125 22:39:50.492437 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-97fj7" Nov 25 22:39:50 crc kubenswrapper[4910]: I1125 22:39:50.531376 4910 scope.go:117] "RemoveContainer" containerID="ddf454929a35cf1cf53b87844a733f4da2a97fe31a1286a3af5635483d6d5da8" Nov 25 22:39:50 crc kubenswrapper[4910]: I1125 22:39:50.552568 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-97fj7"] Nov 25 22:39:50 crc kubenswrapper[4910]: I1125 22:39:50.563216 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-97fj7"] Nov 25 22:39:50 crc kubenswrapper[4910]: I1125 22:39:50.602453 4910 scope.go:117] "RemoveContainer" containerID="2b4bb84d2bb3fb658c0028ef0a85a3a519abc8709f01fd899c4633268f560ff1" Nov 25 22:39:51 crc kubenswrapper[4910]: I1125 22:39:51.216805 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0c5adee-b0d4-4b20-807b-afb07f6be0cc" path="/var/lib/kubelet/pods/f0c5adee-b0d4-4b20-807b-afb07f6be0cc/volumes" Nov 25 22:39:51 crc kubenswrapper[4910]: I1125 22:39:51.441481 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7ccccf649d-9sm5c_fe39bdcb-17f5-455e-89af-d161d0d651fc/barbican-api/0.log" Nov 25 22:39:51 crc kubenswrapper[4910]: I1125 22:39:51.571477 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-7ccccf649d-9sm5c_fe39bdcb-17f5-455e-89af-d161d0d651fc/barbican-api-log/0.log" Nov 25 22:39:51 crc kubenswrapper[4910]: I1125 22:39:51.646498 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-865b64f5bb-fdgzg_7193b97a-2be1-4f8f-9e84-abb09908f78c/barbican-keystone-listener/0.log" Nov 25 22:39:51 crc kubenswrapper[4910]: I1125 22:39:51.708665 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-865b64f5bb-fdgzg_7193b97a-2be1-4f8f-9e84-abb09908f78c/barbican-keystone-listener-log/0.log" Nov 25 22:39:51 crc kubenswrapper[4910]: I1125 22:39:51.818989 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7fdcb7f4c9-g5zfq_463bc99a-ad40-4df5-9b99-d10d0af67cea/barbican-worker/0.log" Nov 25 22:39:51 crc kubenswrapper[4910]: I1125 22:39:51.873314 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7fdcb7f4c9-g5zfq_463bc99a-ad40-4df5-9b99-d10d0af67cea/barbican-worker-log/0.log" Nov 25 22:39:52 crc kubenswrapper[4910]: I1125 22:39:52.045163 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-6qggh_6fbaf31f-bfe9-4f0a-a064-75d015480249/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:39:52 crc kubenswrapper[4910]: I1125 22:39:52.149586 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f8fbe2b4-66f6-440d-8cdd-04534f6069ad/ceilometer-central-agent/0.log" Nov 25 22:39:52 crc kubenswrapper[4910]: I1125 22:39:52.273180 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f8fbe2b4-66f6-440d-8cdd-04534f6069ad/proxy-httpd/0.log" Nov 25 22:39:52 crc kubenswrapper[4910]: I1125 22:39:52.280639 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f8fbe2b4-66f6-440d-8cdd-04534f6069ad/ceilometer-notification-agent/0.log" Nov 25 22:39:52 crc kubenswrapper[4910]: I1125 22:39:52.383817 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_f8fbe2b4-66f6-440d-8cdd-04534f6069ad/sg-core/0.log" Nov 25 22:39:52 crc kubenswrapper[4910]: I1125 22:39:52.564088 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_24fe78bf-5d43-4896-b226-8d33a8856a13/cinder-api-log/0.log" Nov 25 22:39:52 crc kubenswrapper[4910]: I1125 22:39:52.571458 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_24fe78bf-5d43-4896-b226-8d33a8856a13/cinder-api/0.log" Nov 25 22:39:52 crc kubenswrapper[4910]: I1125 22:39:52.691828 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_f9dafbee-be84-4df3-a1d1-6ff36015ec46/cinder-scheduler/0.log" Nov 25 22:39:52 crc kubenswrapper[4910]: I1125 22:39:52.797327 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_f9dafbee-be84-4df3-a1d1-6ff36015ec46/probe/0.log" Nov 25 22:39:52 crc kubenswrapper[4910]: I1125 22:39:52.887694 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-j5fcz_dc112a56-de9c-47b3-8ca9-c0225469f85c/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:39:53 crc kubenswrapper[4910]: I1125 22:39:53.041300 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-wfq4j_6552e880-8d31-43fc-9fee-d2e33c2ca987/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:39:53 crc kubenswrapper[4910]: I1125 22:39:53.133524 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-8c6f6df99-9qdqp_fe3aae4c-2f2b-42be-b179-105323fa0957/init/0.log" Nov 25 22:39:53 crc kubenswrapper[4910]: I1125 22:39:53.347750 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-8c6f6df99-9qdqp_fe3aae4c-2f2b-42be-b179-105323fa0957/init/0.log" Nov 25 22:39:53 crc kubenswrapper[4910]: I1125 22:39:53.387702 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-8c6f6df99-9qdqp_fe3aae4c-2f2b-42be-b179-105323fa0957/dnsmasq-dns/0.log" Nov 25 22:39:53 crc kubenswrapper[4910]: I1125 22:39:53.402143 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-24hvs_a6f639c3-729d-4c6a-9e97-afb151569af5/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:39:53 crc kubenswrapper[4910]: I1125 22:39:53.597635 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_5b3e95ca-7b13-4baf-98f1-465aa3b31a2c/glance-httpd/0.log" Nov 25 22:39:53 crc kubenswrapper[4910]: I1125 22:39:53.666425 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_5b3e95ca-7b13-4baf-98f1-465aa3b31a2c/glance-log/0.log" Nov 25 22:39:53 crc kubenswrapper[4910]: I1125 22:39:53.823792 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_b9542bd3-e5b7-44e2-84bb-11b34d1fc44b/glance-log/0.log" Nov 25 22:39:53 crc kubenswrapper[4910]: I1125 22:39:53.891236 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_b9542bd3-e5b7-44e2-84bb-11b34d1fc44b/glance-httpd/0.log" Nov 25 22:39:54 crc kubenswrapper[4910]: I1125 22:39:54.068610 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-657976db8d-swkbt_7941e190-b648-4b11-946b-dddaa1bc98d9/horizon/0.log" Nov 25 22:39:54 crc kubenswrapper[4910]: I1125 22:39:54.295153 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-z2qjv_ef782e7e-3e6a-41ed-a9b1-343be0faecc3/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:39:54 crc kubenswrapper[4910]: I1125 22:39:54.449956 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-sp4g4_941bb6aa-1438-4ed4-8ed3-3e834a784a79/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:39:54 crc kubenswrapper[4910]: I1125 22:39:54.515480 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-657976db8d-swkbt_7941e190-b648-4b11-946b-dddaa1bc98d9/horizon-log/0.log" Nov 25 22:39:54 crc kubenswrapper[4910]: I1125 22:39:54.620127 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29401801-dfmbk_d759f7d3-5701-4d72-9df3-2509819d80f2/keystone-cron/0.log" Nov 25 22:39:54 crc kubenswrapper[4910]: I1125 22:39:54.889180 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_eaec4e2c-bb9a-4c1f-80d5-c93dce82233e/kube-state-metrics/0.log" Nov 25 22:39:54 crc kubenswrapper[4910]: I1125 22:39:54.896314 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-f7657d4c8-n2wbh_1fc29606-ff34-4170-859a-8357838d9b65/keystone-api/0.log" Nov 25 22:39:54 crc kubenswrapper[4910]: I1125 22:39:54.972036 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-h78fk_1132a133-2fdf-4a87-b132-d1f1c0a26c76/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:39:55 crc kubenswrapper[4910]: I1125 22:39:55.317119 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-667bcb4bc9-bl288_7935f6eb-171e-43a8-9f6c-6bf62769ade6/neutron-api/0.log" Nov 25 22:39:55 crc kubenswrapper[4910]: I1125 22:39:55.356149 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-667bcb4bc9-bl288_7935f6eb-171e-43a8-9f6c-6bf62769ade6/neutron-httpd/0.log" Nov 25 22:39:55 crc kubenswrapper[4910]: I1125 22:39:55.481009 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-2w5xj_ed6d4c0f-684e-4174-ad4c-5f034025d52a/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:39:56 crc kubenswrapper[4910]: I1125 22:39:56.081294 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_caf92f8f-f8b6-4214-8b76-13cfe6bafd4a/nova-api-log/0.log" Nov 25 22:39:56 crc kubenswrapper[4910]: I1125 22:39:56.285107 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_533ec0e8-93b1-4468-8b7f-72071aa8be27/nova-cell0-conductor-conductor/0.log" Nov 25 22:39:56 crc kubenswrapper[4910]: I1125 22:39:56.566587 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_cc5eb389-8176-4989-8f45-a7a9631b286b/nova-cell1-conductor-conductor/0.log" Nov 25 22:39:56 crc kubenswrapper[4910]: I1125 22:39:56.634470 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_caf92f8f-f8b6-4214-8b76-13cfe6bafd4a/nova-api-api/0.log" Nov 25 22:39:56 crc kubenswrapper[4910]: I1125 22:39:56.717164 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_af1314f5-7ef2-46dd-b56d-3320375af199/nova-cell1-novncproxy-novncproxy/0.log" Nov 25 22:39:56 crc kubenswrapper[4910]: I1125 22:39:56.860194 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-sv2kk_0a190739-9d08-41b3-a45d-42d0b636ccad/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:39:56 crc kubenswrapper[4910]: I1125 22:39:56.969281 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_2b80da77-dd73-4886-bcd1-88fb1c484af1/nova-metadata-log/0.log" Nov 25 22:39:57 crc kubenswrapper[4910]: I1125 22:39:57.343775 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_62283554-0498-4bac-b223-8d3c6d21b614/mysql-bootstrap/0.log" Nov 25 22:39:57 crc kubenswrapper[4910]: I1125 22:39:57.486207 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_8dba0933-64a2-4286-baee-149ebff5c09d/nova-scheduler-scheduler/0.log" Nov 25 22:39:57 crc kubenswrapper[4910]: I1125 22:39:57.630567 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_62283554-0498-4bac-b223-8d3c6d21b614/mysql-bootstrap/0.log" Nov 25 22:39:57 crc kubenswrapper[4910]: I1125 22:39:57.655959 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_62283554-0498-4bac-b223-8d3c6d21b614/galera/0.log" Nov 25 22:39:57 crc kubenswrapper[4910]: I1125 22:39:57.878443 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc/mysql-bootstrap/0.log" Nov 25 22:39:58 crc kubenswrapper[4910]: I1125 22:39:58.112056 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc/mysql-bootstrap/0.log" Nov 25 22:39:58 crc kubenswrapper[4910]: I1125 22:39:58.236546 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_ecbb2a7f-c6f7-48f9-ab3a-b0cda9b445dc/galera/0.log" Nov 25 22:39:58 crc kubenswrapper[4910]: I1125 22:39:58.391976 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_e36f2312-81e7-4b57-9131-695681724f08/openstackclient/0.log" Nov 25 22:39:58 crc kubenswrapper[4910]: I1125 22:39:58.519172 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-554dc_5d3afe23-a5d2-4f9c-bdaa-f80020ef6226/ovn-controller/0.log" Nov 25 22:39:58 crc kubenswrapper[4910]: I1125 22:39:58.699255 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_2b80da77-dd73-4886-bcd1-88fb1c484af1/nova-metadata-metadata/0.log" Nov 25 22:39:58 crc kubenswrapper[4910]: I1125 22:39:58.959023 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-lz25t_d73b450a-c8fd-47c7-918c-273ae5d10b8a/openstack-network-exporter/0.log" Nov 25 22:39:59 crc kubenswrapper[4910]: I1125 22:39:59.135699 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dbkwd_d271e423-f378-4368-b055-d89cea058d38/ovsdb-server-init/0.log" Nov 25 22:39:59 crc kubenswrapper[4910]: I1125 22:39:59.419037 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dbkwd_d271e423-f378-4368-b055-d89cea058d38/ovsdb-server-init/0.log" Nov 25 22:39:59 crc kubenswrapper[4910]: I1125 22:39:59.427559 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dbkwd_d271e423-f378-4368-b055-d89cea058d38/ovs-vswitchd/0.log" Nov 25 22:39:59 crc kubenswrapper[4910]: I1125 22:39:59.464073 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-dbkwd_d271e423-f378-4368-b055-d89cea058d38/ovsdb-server/0.log" Nov 25 22:40:00 crc kubenswrapper[4910]: I1125 22:40:00.177351 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-p7pqv_0ab610e6-8ab3-4c4c-83fa-5ce52795f545/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:40:00 crc kubenswrapper[4910]: I1125 22:40:00.249385 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_944fb5f5-a2bc-4328-bbec-203fbfb6cd20/ovn-northd/0.log" Nov 25 22:40:00 crc kubenswrapper[4910]: I1125 22:40:00.290513 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_944fb5f5-a2bc-4328-bbec-203fbfb6cd20/openstack-network-exporter/0.log" Nov 25 22:40:00 crc kubenswrapper[4910]: I1125 22:40:00.512431 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_d4285011-1eac-4f3c-af27-c6c6ad03d8de/openstack-network-exporter/0.log" Nov 25 22:40:00 crc kubenswrapper[4910]: I1125 22:40:00.600439 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_d4285011-1eac-4f3c-af27-c6c6ad03d8de/ovsdbserver-nb/0.log" Nov 25 22:40:00 crc kubenswrapper[4910]: I1125 22:40:00.737611 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_d7e886f1-04bd-4061-9a6c-18a20a1d7cbe/openstack-network-exporter/0.log" Nov 25 22:40:00 crc kubenswrapper[4910]: I1125 22:40:00.903686 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_d7e886f1-04bd-4061-9a6c-18a20a1d7cbe/ovsdbserver-sb/0.log" Nov 25 22:40:01 crc kubenswrapper[4910]: I1125 22:40:01.012730 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-64ff96875d-p4n97_d7f10efc-4222-4871-b684-dc482fd27b01/placement-api/0.log" Nov 25 22:40:01 crc kubenswrapper[4910]: I1125 22:40:01.112198 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-64ff96875d-p4n97_d7f10efc-4222-4871-b684-dc482fd27b01/placement-log/0.log" Nov 25 22:40:01 crc kubenswrapper[4910]: I1125 22:40:01.290022 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_bc2bbda0-2d3e-4794-bc13-21bca025c6fe/setup-container/0.log" Nov 25 22:40:01 crc kubenswrapper[4910]: I1125 22:40:01.776117 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_bc2bbda0-2d3e-4794-bc13-21bca025c6fe/setup-container/0.log" Nov 25 22:40:01 crc kubenswrapper[4910]: I1125 22:40:01.851368 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_9d06ec4c-6e1e-4fc9-9e41-59857b4494fd/setup-container/0.log" Nov 25 22:40:01 crc kubenswrapper[4910]: I1125 22:40:01.885489 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_bc2bbda0-2d3e-4794-bc13-21bca025c6fe/rabbitmq/0.log" Nov 25 22:40:02 crc kubenswrapper[4910]: I1125 22:40:02.561956 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_9d06ec4c-6e1e-4fc9-9e41-59857b4494fd/setup-container/0.log" Nov 25 22:40:02 crc kubenswrapper[4910]: I1125 22:40:02.711291 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_9d06ec4c-6e1e-4fc9-9e41-59857b4494fd/rabbitmq/0.log" Nov 25 22:40:02 crc kubenswrapper[4910]: I1125 22:40:02.799196 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-vhjzp_c552b066-9a2f-46d0-9865-adaa8c454811/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:40:03 crc kubenswrapper[4910]: I1125 22:40:03.060947 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-qbdmv_9cd37c71-fc60-4099-a183-6f9e8a918e1e/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:40:03 crc kubenswrapper[4910]: I1125 22:40:03.062715 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-4z9c2_444f986e-2346-419a-a78e-584196602880/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:40:03 crc kubenswrapper[4910]: I1125 22:40:03.630556 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-r9mbm_475e4e94-21eb-40fb-8d3d-b5359cc77a88/ssh-known-hosts-edpm-deployment/0.log" Nov 25 22:40:03 crc kubenswrapper[4910]: I1125 22:40:03.663364 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-t7clg_77fc796b-aaee-4dad-a82d-464aaf60ab47/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:40:03 crc kubenswrapper[4910]: I1125 22:40:03.992059 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6575886cb7-hv9qm_8028bd01-f5f2-4c20-9f51-c6a7e06571fd/proxy-server/0.log" Nov 25 22:40:04 crc kubenswrapper[4910]: I1125 22:40:04.038510 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-v7clx_5050ee25-88de-4888-ba01-fc11c71df0a1/swift-ring-rebalance/0.log" Nov 25 22:40:04 crc kubenswrapper[4910]: I1125 22:40:04.201170 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/account-auditor/0.log" Nov 25 22:40:04 crc kubenswrapper[4910]: I1125 22:40:04.208315 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-6575886cb7-hv9qm_8028bd01-f5f2-4c20-9f51-c6a7e06571fd/proxy-httpd/0.log" Nov 25 22:40:04 crc kubenswrapper[4910]: I1125 22:40:04.240681 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/account-reaper/0.log" Nov 25 22:40:04 crc kubenswrapper[4910]: I1125 22:40:04.446063 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/account-server/0.log" Nov 25 22:40:04 crc kubenswrapper[4910]: I1125 22:40:04.516860 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/container-auditor/0.log" Nov 25 22:40:04 crc kubenswrapper[4910]: I1125 22:40:04.539583 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/account-replicator/0.log" Nov 25 22:40:04 crc kubenswrapper[4910]: I1125 22:40:04.616385 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/container-replicator/0.log" Nov 25 22:40:04 crc kubenswrapper[4910]: I1125 22:40:04.727902 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/container-updater/0.log" Nov 25 22:40:04 crc kubenswrapper[4910]: I1125 22:40:04.795821 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/container-server/0.log" Nov 25 22:40:04 crc kubenswrapper[4910]: I1125 22:40:04.844928 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/object-auditor/0.log" Nov 25 22:40:04 crc kubenswrapper[4910]: I1125 22:40:04.900009 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/object-expirer/0.log" Nov 25 22:40:05 crc kubenswrapper[4910]: I1125 22:40:05.068841 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/object-server/0.log" Nov 25 22:40:05 crc kubenswrapper[4910]: I1125 22:40:05.078806 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/rsync/0.log" Nov 25 22:40:05 crc kubenswrapper[4910]: I1125 22:40:05.088891 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/object-replicator/0.log" Nov 25 22:40:05 crc kubenswrapper[4910]: I1125 22:40:05.171311 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/object-updater/0.log" Nov 25 22:40:05 crc kubenswrapper[4910]: I1125 22:40:05.355613 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_df4c228a-b3ae-4de6-bd0b-a761692c4476/swift-recon-cron/0.log" Nov 25 22:40:05 crc kubenswrapper[4910]: I1125 22:40:05.421984 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-vvsnw_66309eee-ce32-4108-82f9-e96dbc03dc45/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:40:05 crc kubenswrapper[4910]: I1125 22:40:05.888433 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_c1d3df8e-e3e1-4065-8736-979a4abaec2c/tempest-tests-tempest-tests-runner/0.log" Nov 25 22:40:05 crc kubenswrapper[4910]: I1125 22:40:05.976800 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_2fc860e3-9496-49ee-8083-468eb806013d/test-operator-logs-container/0.log" Nov 25 22:40:06 crc kubenswrapper[4910]: I1125 22:40:06.134974 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-9x7tj_d8581103-5144-4384-8d68-9160c64f6233/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 25 22:40:16 crc kubenswrapper[4910]: I1125 22:40:16.361588 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_c811f98e-8a72-406b-b0c3-35a7102dd46e/memcached/0.log" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.563044 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xtn5v"] Nov 25 22:40:17 crc kubenswrapper[4910]: E1125 22:40:17.564994 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0c5adee-b0d4-4b20-807b-afb07f6be0cc" containerName="registry-server" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.565065 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0c5adee-b0d4-4b20-807b-afb07f6be0cc" containerName="registry-server" Nov 25 22:40:17 crc kubenswrapper[4910]: E1125 22:40:17.565149 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0c5adee-b0d4-4b20-807b-afb07f6be0cc" containerName="extract-utilities" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.565199 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0c5adee-b0d4-4b20-807b-afb07f6be0cc" containerName="extract-utilities" Nov 25 22:40:17 crc kubenswrapper[4910]: E1125 22:40:17.565320 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0c5adee-b0d4-4b20-807b-afb07f6be0cc" containerName="extract-content" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.565453 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0c5adee-b0d4-4b20-807b-afb07f6be0cc" containerName="extract-content" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.565720 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0c5adee-b0d4-4b20-807b-afb07f6be0cc" containerName="registry-server" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.567403 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.581651 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xtn5v"] Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.593646 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-utilities\") pod \"community-operators-xtn5v\" (UID: \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\") " pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.594381 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-catalog-content\") pod \"community-operators-xtn5v\" (UID: \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\") " pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.594519 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m78p6\" (UniqueName: \"kubernetes.io/projected/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-kube-api-access-m78p6\") pod \"community-operators-xtn5v\" (UID: \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\") " pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.696759 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-catalog-content\") pod \"community-operators-xtn5v\" (UID: \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\") " pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.696831 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m78p6\" (UniqueName: \"kubernetes.io/projected/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-kube-api-access-m78p6\") pod \"community-operators-xtn5v\" (UID: \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\") " pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.696895 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-utilities\") pod \"community-operators-xtn5v\" (UID: \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\") " pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.697509 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-catalog-content\") pod \"community-operators-xtn5v\" (UID: \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\") " pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.697552 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-utilities\") pod \"community-operators-xtn5v\" (UID: \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\") " pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.720213 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m78p6\" (UniqueName: \"kubernetes.io/projected/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-kube-api-access-m78p6\") pod \"community-operators-xtn5v\" (UID: \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\") " pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:17 crc kubenswrapper[4910]: I1125 22:40:17.887853 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:18 crc kubenswrapper[4910]: I1125 22:40:18.512083 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xtn5v"] Nov 25 22:40:18 crc kubenswrapper[4910]: I1125 22:40:18.840569 4910 generic.go:334] "Generic (PLEG): container finished" podID="213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" containerID="fe70523b6d1d4d49a89587a0088790bf419f19aa09ef1acc13dbd12454880644" exitCode=0 Nov 25 22:40:18 crc kubenswrapper[4910]: I1125 22:40:18.840630 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xtn5v" event={"ID":"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7","Type":"ContainerDied","Data":"fe70523b6d1d4d49a89587a0088790bf419f19aa09ef1acc13dbd12454880644"} Nov 25 22:40:18 crc kubenswrapper[4910]: I1125 22:40:18.840666 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xtn5v" event={"ID":"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7","Type":"ContainerStarted","Data":"18ced708373c746f1757fd0ffdfc913ea261713742f24f4b1724539372fa201f"} Nov 25 22:40:20 crc kubenswrapper[4910]: I1125 22:40:20.888771 4910 generic.go:334] "Generic (PLEG): container finished" podID="213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" containerID="81f9afbf1d1d6872da19ade1289da16ec8af4219b1b9a9da72121bdbf1d9399a" exitCode=0 Nov 25 22:40:20 crc kubenswrapper[4910]: I1125 22:40:20.888853 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xtn5v" event={"ID":"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7","Type":"ContainerDied","Data":"81f9afbf1d1d6872da19ade1289da16ec8af4219b1b9a9da72121bdbf1d9399a"} Nov 25 22:40:21 crc kubenswrapper[4910]: I1125 22:40:21.905447 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xtn5v" event={"ID":"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7","Type":"ContainerStarted","Data":"e4a8ddce62250c52b2fb45919945acd36236047e668a8962ea3cdc1a0dfde6cf"} Nov 25 22:40:21 crc kubenswrapper[4910]: I1125 22:40:21.933391 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xtn5v" podStartSLOduration=2.474681444 podStartE2EDuration="4.933365102s" podCreationTimestamp="2025-11-25 22:40:17 +0000 UTC" firstStartedPulling="2025-11-25 22:40:18.843226968 +0000 UTC m=+4174.305703290" lastFinishedPulling="2025-11-25 22:40:21.301910626 +0000 UTC m=+4176.764386948" observedRunningTime="2025-11-25 22:40:21.925967525 +0000 UTC m=+4177.388443847" watchObservedRunningTime="2025-11-25 22:40:21.933365102 +0000 UTC m=+4177.395841424" Nov 25 22:40:23 crc kubenswrapper[4910]: I1125 22:40:23.099034 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:40:23 crc kubenswrapper[4910]: I1125 22:40:23.099417 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:40:27 crc kubenswrapper[4910]: I1125 22:40:27.888498 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:27 crc kubenswrapper[4910]: I1125 22:40:27.889463 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:27 crc kubenswrapper[4910]: I1125 22:40:27.965783 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:28 crc kubenswrapper[4910]: I1125 22:40:28.061417 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:28 crc kubenswrapper[4910]: I1125 22:40:28.216488 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xtn5v"] Nov 25 22:40:30 crc kubenswrapper[4910]: I1125 22:40:30.008638 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xtn5v" podUID="213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" containerName="registry-server" containerID="cri-o://e4a8ddce62250c52b2fb45919945acd36236047e668a8962ea3cdc1a0dfde6cf" gracePeriod=2 Nov 25 22:40:30 crc kubenswrapper[4910]: I1125 22:40:30.497379 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:30 crc kubenswrapper[4910]: I1125 22:40:30.633730 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m78p6\" (UniqueName: \"kubernetes.io/projected/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-kube-api-access-m78p6\") pod \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\" (UID: \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\") " Nov 25 22:40:30 crc kubenswrapper[4910]: I1125 22:40:30.633825 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-utilities\") pod \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\" (UID: \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\") " Nov 25 22:40:30 crc kubenswrapper[4910]: I1125 22:40:30.633969 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-catalog-content\") pod \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\" (UID: \"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7\") " Nov 25 22:40:30 crc kubenswrapper[4910]: I1125 22:40:30.641516 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-utilities" (OuterVolumeSpecName: "utilities") pod "213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" (UID: "213a6afd-d9b6-4977-b1ab-40ad59f6e4a7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:40:30 crc kubenswrapper[4910]: I1125 22:40:30.658825 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-kube-api-access-m78p6" (OuterVolumeSpecName: "kube-api-access-m78p6") pod "213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" (UID: "213a6afd-d9b6-4977-b1ab-40ad59f6e4a7"). InnerVolumeSpecName "kube-api-access-m78p6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:40:30 crc kubenswrapper[4910]: I1125 22:40:30.736458 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m78p6\" (UniqueName: \"kubernetes.io/projected/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-kube-api-access-m78p6\") on node \"crc\" DevicePath \"\"" Nov 25 22:40:30 crc kubenswrapper[4910]: I1125 22:40:30.736508 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:40:30 crc kubenswrapper[4910]: I1125 22:40:30.813843 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" (UID: "213a6afd-d9b6-4977-b1ab-40ad59f6e4a7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:40:30 crc kubenswrapper[4910]: I1125 22:40:30.838585 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.026863 4910 generic.go:334] "Generic (PLEG): container finished" podID="213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" containerID="e4a8ddce62250c52b2fb45919945acd36236047e668a8962ea3cdc1a0dfde6cf" exitCode=0 Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.026953 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xtn5v" event={"ID":"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7","Type":"ContainerDied","Data":"e4a8ddce62250c52b2fb45919945acd36236047e668a8962ea3cdc1a0dfde6cf"} Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.026980 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xtn5v" Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.027005 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xtn5v" event={"ID":"213a6afd-d9b6-4977-b1ab-40ad59f6e4a7","Type":"ContainerDied","Data":"18ced708373c746f1757fd0ffdfc913ea261713742f24f4b1724539372fa201f"} Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.027045 4910 scope.go:117] "RemoveContainer" containerID="e4a8ddce62250c52b2fb45919945acd36236047e668a8962ea3cdc1a0dfde6cf" Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.075063 4910 scope.go:117] "RemoveContainer" containerID="81f9afbf1d1d6872da19ade1289da16ec8af4219b1b9a9da72121bdbf1d9399a" Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.087551 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xtn5v"] Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.102101 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xtn5v"] Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.102945 4910 scope.go:117] "RemoveContainer" containerID="fe70523b6d1d4d49a89587a0088790bf419f19aa09ef1acc13dbd12454880644" Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.149505 4910 scope.go:117] "RemoveContainer" containerID="e4a8ddce62250c52b2fb45919945acd36236047e668a8962ea3cdc1a0dfde6cf" Nov 25 22:40:31 crc kubenswrapper[4910]: E1125 22:40:31.150795 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4a8ddce62250c52b2fb45919945acd36236047e668a8962ea3cdc1a0dfde6cf\": container with ID starting with e4a8ddce62250c52b2fb45919945acd36236047e668a8962ea3cdc1a0dfde6cf not found: ID does not exist" containerID="e4a8ddce62250c52b2fb45919945acd36236047e668a8962ea3cdc1a0dfde6cf" Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.150853 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4a8ddce62250c52b2fb45919945acd36236047e668a8962ea3cdc1a0dfde6cf"} err="failed to get container status \"e4a8ddce62250c52b2fb45919945acd36236047e668a8962ea3cdc1a0dfde6cf\": rpc error: code = NotFound desc = could not find container \"e4a8ddce62250c52b2fb45919945acd36236047e668a8962ea3cdc1a0dfde6cf\": container with ID starting with e4a8ddce62250c52b2fb45919945acd36236047e668a8962ea3cdc1a0dfde6cf not found: ID does not exist" Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.150892 4910 scope.go:117] "RemoveContainer" containerID="81f9afbf1d1d6872da19ade1289da16ec8af4219b1b9a9da72121bdbf1d9399a" Nov 25 22:40:31 crc kubenswrapper[4910]: E1125 22:40:31.151526 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81f9afbf1d1d6872da19ade1289da16ec8af4219b1b9a9da72121bdbf1d9399a\": container with ID starting with 81f9afbf1d1d6872da19ade1289da16ec8af4219b1b9a9da72121bdbf1d9399a not found: ID does not exist" containerID="81f9afbf1d1d6872da19ade1289da16ec8af4219b1b9a9da72121bdbf1d9399a" Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.151572 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81f9afbf1d1d6872da19ade1289da16ec8af4219b1b9a9da72121bdbf1d9399a"} err="failed to get container status \"81f9afbf1d1d6872da19ade1289da16ec8af4219b1b9a9da72121bdbf1d9399a\": rpc error: code = NotFound desc = could not find container \"81f9afbf1d1d6872da19ade1289da16ec8af4219b1b9a9da72121bdbf1d9399a\": container with ID starting with 81f9afbf1d1d6872da19ade1289da16ec8af4219b1b9a9da72121bdbf1d9399a not found: ID does not exist" Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.151605 4910 scope.go:117] "RemoveContainer" containerID="fe70523b6d1d4d49a89587a0088790bf419f19aa09ef1acc13dbd12454880644" Nov 25 22:40:31 crc kubenswrapper[4910]: E1125 22:40:31.151911 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe70523b6d1d4d49a89587a0088790bf419f19aa09ef1acc13dbd12454880644\": container with ID starting with fe70523b6d1d4d49a89587a0088790bf419f19aa09ef1acc13dbd12454880644 not found: ID does not exist" containerID="fe70523b6d1d4d49a89587a0088790bf419f19aa09ef1acc13dbd12454880644" Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.151950 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe70523b6d1d4d49a89587a0088790bf419f19aa09ef1acc13dbd12454880644"} err="failed to get container status \"fe70523b6d1d4d49a89587a0088790bf419f19aa09ef1acc13dbd12454880644\": rpc error: code = NotFound desc = could not find container \"fe70523b6d1d4d49a89587a0088790bf419f19aa09ef1acc13dbd12454880644\": container with ID starting with fe70523b6d1d4d49a89587a0088790bf419f19aa09ef1acc13dbd12454880644 not found: ID does not exist" Nov 25 22:40:31 crc kubenswrapper[4910]: I1125 22:40:31.221103 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" path="/var/lib/kubelet/pods/213a6afd-d9b6-4977-b1ab-40ad59f6e4a7/volumes" Nov 25 22:40:39 crc kubenswrapper[4910]: I1125 22:40:39.672487 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp_770f8bcb-c718-44f0-9311-d3f1a782aed2/util/0.log" Nov 25 22:40:39 crc kubenswrapper[4910]: I1125 22:40:39.820987 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp_770f8bcb-c718-44f0-9311-d3f1a782aed2/util/0.log" Nov 25 22:40:39 crc kubenswrapper[4910]: I1125 22:40:39.849079 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp_770f8bcb-c718-44f0-9311-d3f1a782aed2/pull/0.log" Nov 25 22:40:39 crc kubenswrapper[4910]: I1125 22:40:39.912813 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp_770f8bcb-c718-44f0-9311-d3f1a782aed2/pull/0.log" Nov 25 22:40:40 crc kubenswrapper[4910]: I1125 22:40:40.090137 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp_770f8bcb-c718-44f0-9311-d3f1a782aed2/util/0.log" Nov 25 22:40:40 crc kubenswrapper[4910]: I1125 22:40:40.144139 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp_770f8bcb-c718-44f0-9311-d3f1a782aed2/pull/0.log" Nov 25 22:40:40 crc kubenswrapper[4910]: I1125 22:40:40.188541 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_9a961a8416366f70397849f372a183200993c0d77a3733fbcb262311c4v89jp_770f8bcb-c718-44f0-9311-d3f1a782aed2/extract/0.log" Nov 25 22:40:40 crc kubenswrapper[4910]: I1125 22:40:40.311504 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-x58hx_22c915a2-80bf-454b-b0e6-7a5bbafec7a5/kube-rbac-proxy/0.log" Nov 25 22:40:40 crc kubenswrapper[4910]: I1125 22:40:40.392161 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-gvsd5_ef4b8019-398c-453d-9b78-71c340bf2bdd/kube-rbac-proxy/0.log" Nov 25 22:40:40 crc kubenswrapper[4910]: I1125 22:40:40.426980 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7d9dfd778-x58hx_22c915a2-80bf-454b-b0e6-7a5bbafec7a5/manager/0.log" Nov 25 22:40:41 crc kubenswrapper[4910]: I1125 22:40:41.199501 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-859b6ccc6-gvsd5_ef4b8019-398c-453d-9b78-71c340bf2bdd/manager/0.log" Nov 25 22:40:41 crc kubenswrapper[4910]: I1125 22:40:41.247281 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-gbgd2_cd608fcb-14bd-424e-9f6e-c0eea37397ea/manager/0.log" Nov 25 22:40:41 crc kubenswrapper[4910]: I1125 22:40:41.285230 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-78b4bc895b-gbgd2_cd608fcb-14bd-424e-9f6e-c0eea37397ea/kube-rbac-proxy/0.log" Nov 25 22:40:41 crc kubenswrapper[4910]: I1125 22:40:41.510130 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-776b995c47-chsbs_bd7a08fe-f30e-4a1e-a92d-7c813fd14fa9/kube-rbac-proxy/0.log" Nov 25 22:40:41 crc kubenswrapper[4910]: I1125 22:40:41.566518 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-776b995c47-chsbs_bd7a08fe-f30e-4a1e-a92d-7c813fd14fa9/manager/0.log" Nov 25 22:40:41 crc kubenswrapper[4910]: I1125 22:40:41.934691 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-vwgmq_28574aa2-4470-4432-b7f0-4b3b52b5f8b9/kube-rbac-proxy/0.log" Nov 25 22:40:41 crc kubenswrapper[4910]: I1125 22:40:41.949128 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5f64f6f8bb-vwgmq_28574aa2-4470-4432-b7f0-4b3b52b5f8b9/manager/0.log" Nov 25 22:40:42 crc kubenswrapper[4910]: I1125 22:40:42.092431 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-pqkz6_6c888013-ea9c-433c-973f-af7c5c22f8c9/kube-rbac-proxy/0.log" Nov 25 22:40:42 crc kubenswrapper[4910]: I1125 22:40:42.159269 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-68c6d99b8f-pqkz6_6c888013-ea9c-433c-973f-af7c5c22f8c9/manager/0.log" Nov 25 22:40:42 crc kubenswrapper[4910]: I1125 22:40:42.464766 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-9mx4m_0345d3a7-45fa-4bce-8dcb-4bef18de4b21/kube-rbac-proxy/0.log" Nov 25 22:40:42 crc kubenswrapper[4910]: I1125 22:40:42.507238 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-57548d458d-9mx4m_0345d3a7-45fa-4bce-8dcb-4bef18de4b21/manager/0.log" Nov 25 22:40:42 crc kubenswrapper[4910]: I1125 22:40:42.519398 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-q9hg4_2cce005e-33cd-4b63-8798-b0b7eb53ba73/kube-rbac-proxy/0.log" Nov 25 22:40:42 crc kubenswrapper[4910]: I1125 22:40:42.723856 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-6c548fd776-q9hg4_2cce005e-33cd-4b63-8798-b0b7eb53ba73/manager/0.log" Nov 25 22:40:42 crc kubenswrapper[4910]: I1125 22:40:42.763045 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-546d4bdf48-4hf76_c27ac874-a062-4342-9559-a14acbff4c9d/kube-rbac-proxy/0.log" Nov 25 22:40:42 crc kubenswrapper[4910]: I1125 22:40:42.779481 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-546d4bdf48-4hf76_c27ac874-a062-4342-9559-a14acbff4c9d/manager/0.log" Nov 25 22:40:42 crc kubenswrapper[4910]: I1125 22:40:42.982896 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6546668bfd-z9dd7_5c6fa310-d85a-4ac3-be15-478635a8c221/kube-rbac-proxy/0.log" Nov 25 22:40:43 crc kubenswrapper[4910]: I1125 22:40:43.037842 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6546668bfd-z9dd7_5c6fa310-d85a-4ac3-be15-478635a8c221/manager/0.log" Nov 25 22:40:43 crc kubenswrapper[4910]: I1125 22:40:43.102101 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-tkpxw_44442e02-9c1f-4a6e-bcdd-237b8260638d/kube-rbac-proxy/0.log" Nov 25 22:40:43 crc kubenswrapper[4910]: I1125 22:40:43.106694 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-56bbcc9d85-tkpxw_44442e02-9c1f-4a6e-bcdd-237b8260638d/manager/0.log" Nov 25 22:40:43 crc kubenswrapper[4910]: I1125 22:40:43.232753 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-t4qfc_eadae0d9-eee7-42f3-aa0e-c42ef3282f24/kube-rbac-proxy/0.log" Nov 25 22:40:43 crc kubenswrapper[4910]: I1125 22:40:43.467993 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-555bbdd45-7f94x_ad233905-ebeb-4698-8261-d8a395be75d7/kube-rbac-proxy/0.log" Nov 25 22:40:43 crc kubenswrapper[4910]: I1125 22:40:43.496689 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5fdfd5b6b5-t4qfc_eadae0d9-eee7-42f3-aa0e-c42ef3282f24/manager/0.log" Nov 25 22:40:43 crc kubenswrapper[4910]: I1125 22:40:43.636257 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-555bbdd45-7f94x_ad233905-ebeb-4698-8261-d8a395be75d7/manager/0.log" Nov 25 22:40:43 crc kubenswrapper[4910]: I1125 22:40:43.730429 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-czf4h_2c94f244-a036-47af-8ba4-5dfe41ad5e66/manager/0.log" Nov 25 22:40:43 crc kubenswrapper[4910]: I1125 22:40:43.766643 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-64cdc6ff96-czf4h_2c94f244-a036-47af-8ba4-5dfe41ad5e66/kube-rbac-proxy/0.log" Nov 25 22:40:43 crc kubenswrapper[4910]: I1125 22:40:43.822749 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd_65280dcb-6ac6-443b-88f0-7d3b0dadb4f8/kube-rbac-proxy/0.log" Nov 25 22:40:43 crc kubenswrapper[4910]: I1125 22:40:43.916695 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-f44bdcf57-5n5vd_65280dcb-6ac6-443b-88f0-7d3b0dadb4f8/manager/0.log" Nov 25 22:40:44 crc kubenswrapper[4910]: I1125 22:40:44.249191 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-5475b86485-8h9j5_207712f3-d06c-435f-9a0d-f6a895ee4578/operator/0.log" Nov 25 22:40:44 crc kubenswrapper[4910]: I1125 22:40:44.316118 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-7lqkm_a4af1c8b-9a29-47cc-aec2-501fe04e24fd/registry-server/0.log" Nov 25 22:40:44 crc kubenswrapper[4910]: I1125 22:40:44.446059 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-6c4d7c9757-gpg9j_5c38fd88-4bb1-4d48-a8d4-fe533cbb2d0c/kube-rbac-proxy/0.log" Nov 25 22:40:44 crc kubenswrapper[4910]: I1125 22:40:44.657571 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-6c4d7c9757-gpg9j_5c38fd88-4bb1-4d48-a8d4-fe533cbb2d0c/manager/0.log" Nov 25 22:40:44 crc kubenswrapper[4910]: I1125 22:40:44.699263 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-wsd5q_f3384730-e8d8-4e36-9f3e-8e2dbf3176cb/kube-rbac-proxy/0.log" Nov 25 22:40:44 crc kubenswrapper[4910]: I1125 22:40:44.780697 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-78f8948974-wsd5q_f3384730-e8d8-4e36-9f3e-8e2dbf3176cb/manager/0.log" Nov 25 22:40:44 crc kubenswrapper[4910]: I1125 22:40:44.971455 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-dvpfs_838225a6-f682-4181-aeab-073767c8d49a/operator/0.log" Nov 25 22:40:45 crc kubenswrapper[4910]: I1125 22:40:45.047701 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-wc77r_ba6c852e-59d0-4e5a-8967-3502457d62ec/kube-rbac-proxy/0.log" Nov 25 22:40:45 crc kubenswrapper[4910]: I1125 22:40:45.104780 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-758b84fd57-x2sxf_9f2ecc40-f2f3-4e9e-b74e-c38ef55879b9/manager/0.log" Nov 25 22:40:45 crc kubenswrapper[4910]: I1125 22:40:45.429701 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-6zxdg_e117033c-b566-4c46-bd57-9e173e88a224/kube-rbac-proxy/0.log" Nov 25 22:40:45 crc kubenswrapper[4910]: I1125 22:40:45.499552 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-5f8c65bbfc-wc77r_ba6c852e-59d0-4e5a-8967-3502457d62ec/manager/0.log" Nov 25 22:40:45 crc kubenswrapper[4910]: I1125 22:40:45.617143 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-76cc84c6bb-6zxdg_e117033c-b566-4c46-bd57-9e173e88a224/manager/0.log" Nov 25 22:40:45 crc kubenswrapper[4910]: I1125 22:40:45.701411 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-74fcfc6d4b-2jdvk_bb7d559d-1779-400f-b556-1adbb0c61b60/kube-rbac-proxy/0.log" Nov 25 22:40:45 crc kubenswrapper[4910]: I1125 22:40:45.768679 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-74fcfc6d4b-2jdvk_bb7d559d-1779-400f-b556-1adbb0c61b60/manager/0.log" Nov 25 22:40:45 crc kubenswrapper[4910]: I1125 22:40:45.861462 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-g9p8x_0ac69701-2b03-4d80-bb8f-8f46acb193e4/kube-rbac-proxy/0.log" Nov 25 22:40:45 crc kubenswrapper[4910]: I1125 22:40:45.867303 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-656dcb59d4-g9p8x_0ac69701-2b03-4d80-bb8f-8f46acb193e4/manager/0.log" Nov 25 22:40:53 crc kubenswrapper[4910]: I1125 22:40:53.099315 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:40:53 crc kubenswrapper[4910]: I1125 22:40:53.100137 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:41:09 crc kubenswrapper[4910]: I1125 22:41:09.831238 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-6ncxd_b8eb6262-2c30-4192-8936-9463698c361e/control-plane-machine-set-operator/0.log" Nov 25 22:41:10 crc kubenswrapper[4910]: I1125 22:41:10.018287 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-8gf7r_bc0f5871-442b-4fa3-863c-173c2df1ffd4/kube-rbac-proxy/0.log" Nov 25 22:41:10 crc kubenswrapper[4910]: I1125 22:41:10.081142 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-8gf7r_bc0f5871-442b-4fa3-863c-173c2df1ffd4/machine-api-operator/0.log" Nov 25 22:41:23 crc kubenswrapper[4910]: I1125 22:41:23.099639 4910 patch_prober.go:28] interesting pod/machine-config-daemon-g8f4t container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 25 22:41:23 crc kubenswrapper[4910]: I1125 22:41:23.100805 4910 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 25 22:41:23 crc kubenswrapper[4910]: I1125 22:41:23.101008 4910 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" Nov 25 22:41:23 crc kubenswrapper[4910]: I1125 22:41:23.103968 4910 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc"} pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 25 22:41:23 crc kubenswrapper[4910]: I1125 22:41:23.104295 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerName="machine-config-daemon" containerID="cri-o://08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" gracePeriod=600 Nov 25 22:41:23 crc kubenswrapper[4910]: E1125 22:41:23.249932 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:41:23 crc kubenswrapper[4910]: I1125 22:41:23.594710 4910 generic.go:334] "Generic (PLEG): container finished" podID="89c4a6ab-992c-467f-92fe-1111582e1b49" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" exitCode=0 Nov 25 22:41:23 crc kubenswrapper[4910]: I1125 22:41:23.594788 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerDied","Data":"08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc"} Nov 25 22:41:23 crc kubenswrapper[4910]: I1125 22:41:23.594905 4910 scope.go:117] "RemoveContainer" containerID="ba9869401faf0daa1e3b20ff8fe3e39564363a9833a4e10f10eec05aeff7e2b2" Nov 25 22:41:23 crc kubenswrapper[4910]: I1125 22:41:23.595976 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:41:23 crc kubenswrapper[4910]: E1125 22:41:23.596380 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:41:25 crc kubenswrapper[4910]: I1125 22:41:25.412729 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-kl2mf_949d68d4-09e9-4d53-a0d6-0d667e0c7b09/cert-manager-controller/0.log" Nov 25 22:41:25 crc kubenswrapper[4910]: I1125 22:41:25.690425 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-xtxpk_ce2757a9-3fa1-4cf5-9ace-bc7cc1922640/cert-manager-webhook/0.log" Nov 25 22:41:25 crc kubenswrapper[4910]: I1125 22:41:25.697692 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-fxdt9_a1017150-9116-4453-84f8-bc8148ee529e/cert-manager-cainjector/0.log" Nov 25 22:41:38 crc kubenswrapper[4910]: I1125 22:41:38.204762 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:41:38 crc kubenswrapper[4910]: E1125 22:41:38.205715 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:41:42 crc kubenswrapper[4910]: I1125 22:41:42.124394 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-h5pzd_97f18c66-ca4f-40ce-8b4f-b43cd7a99690/nmstate-console-plugin/0.log" Nov 25 22:41:43 crc kubenswrapper[4910]: I1125 22:41:43.030344 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-2hz8s_27c220ba-4a63-4e7f-85f5-f1aa823b41cc/nmstate-handler/0.log" Nov 25 22:41:43 crc kubenswrapper[4910]: I1125 22:41:43.086651 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-m5hsc_db10171c-40c8-4bfd-88b8-c1bd80b4e37c/kube-rbac-proxy/0.log" Nov 25 22:41:43 crc kubenswrapper[4910]: I1125 22:41:43.121318 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-m5hsc_db10171c-40c8-4bfd-88b8-c1bd80b4e37c/nmstate-metrics/0.log" Nov 25 22:41:43 crc kubenswrapper[4910]: I1125 22:41:43.350742 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-bgkcs_3b2b7b01-5b19-471d-bec2-10f3182a21cd/nmstate-operator/0.log" Nov 25 22:41:43 crc kubenswrapper[4910]: I1125 22:41:43.378261 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-56dnp_f21c62ce-5e4c-4730-afa0-9d4ef734952f/nmstate-webhook/0.log" Nov 25 22:41:49 crc kubenswrapper[4910]: I1125 22:41:49.204805 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:41:49 crc kubenswrapper[4910]: E1125 22:41:49.205861 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:42:02 crc kubenswrapper[4910]: I1125 22:42:02.205959 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-57bhp_8517a279-1eca-4be6-a4c0-09716207a094/kube-rbac-proxy/0.log" Nov 25 22:42:02 crc kubenswrapper[4910]: I1125 22:42:02.394761 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-57bhp_8517a279-1eca-4be6-a4c0-09716207a094/controller/0.log" Nov 25 22:42:02 crc kubenswrapper[4910]: I1125 22:42:02.497954 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-frr-files/0.log" Nov 25 22:42:02 crc kubenswrapper[4910]: I1125 22:42:02.724496 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-frr-files/0.log" Nov 25 22:42:02 crc kubenswrapper[4910]: I1125 22:42:02.756801 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-reloader/0.log" Nov 25 22:42:02 crc kubenswrapper[4910]: I1125 22:42:02.773554 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-metrics/0.log" Nov 25 22:42:02 crc kubenswrapper[4910]: I1125 22:42:02.777393 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-reloader/0.log" Nov 25 22:42:03 crc kubenswrapper[4910]: I1125 22:42:03.014930 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-metrics/0.log" Nov 25 22:42:03 crc kubenswrapper[4910]: I1125 22:42:03.041852 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-frr-files/0.log" Nov 25 22:42:03 crc kubenswrapper[4910]: I1125 22:42:03.064358 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-reloader/0.log" Nov 25 22:42:03 crc kubenswrapper[4910]: I1125 22:42:03.122929 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-metrics/0.log" Nov 25 22:42:03 crc kubenswrapper[4910]: I1125 22:42:03.214092 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:42:03 crc kubenswrapper[4910]: E1125 22:42:03.214537 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:42:03 crc kubenswrapper[4910]: I1125 22:42:03.308226 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-reloader/0.log" Nov 25 22:42:03 crc kubenswrapper[4910]: I1125 22:42:03.309929 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-frr-files/0.log" Nov 25 22:42:03 crc kubenswrapper[4910]: I1125 22:42:03.360388 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/controller/0.log" Nov 25 22:42:03 crc kubenswrapper[4910]: I1125 22:42:03.369086 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/cp-metrics/0.log" Nov 25 22:42:03 crc kubenswrapper[4910]: I1125 22:42:03.521793 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/frr-metrics/0.log" Nov 25 22:42:03 crc kubenswrapper[4910]: I1125 22:42:03.567006 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/kube-rbac-proxy/0.log" Nov 25 22:42:03 crc kubenswrapper[4910]: I1125 22:42:03.679114 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/kube-rbac-proxy-frr/0.log" Nov 25 22:42:03 crc kubenswrapper[4910]: I1125 22:42:03.784886 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/reloader/0.log" Nov 25 22:42:04 crc kubenswrapper[4910]: I1125 22:42:04.026464 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-csqvk_5f6618c7-ba0f-45ce-a1f1-d42f55e72500/frr-k8s-webhook-server/0.log" Nov 25 22:42:04 crc kubenswrapper[4910]: I1125 22:42:04.147026 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-9c5c567bb-p4k28_6299e276-3b3c-4c65-abab-321a1129c175/manager/0.log" Nov 25 22:42:04 crc kubenswrapper[4910]: I1125 22:42:04.301140 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-747dcffbf8-gz64s_7cd2774b-6d1f-4fc6-811e-a13f715832ab/webhook-server/0.log" Nov 25 22:42:04 crc kubenswrapper[4910]: I1125 22:42:04.562410 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fmrxg_6dcf2928-8050-4c63-9035-35b85bb922ce/kube-rbac-proxy/0.log" Nov 25 22:42:05 crc kubenswrapper[4910]: I1125 22:42:05.069465 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-fmrxg_6dcf2928-8050-4c63-9035-35b85bb922ce/speaker/0.log" Nov 25 22:42:05 crc kubenswrapper[4910]: I1125 22:42:05.155888 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-qw4hv_9d4be312-9921-4b2e-8456-acd2b0f012de/frr/0.log" Nov 25 22:42:14 crc kubenswrapper[4910]: I1125 22:42:14.204326 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:42:14 crc kubenswrapper[4910]: E1125 22:42:14.205187 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:42:21 crc kubenswrapper[4910]: I1125 22:42:21.716804 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc_3c3663ce-55da-4575-b39f-43df7bf5e729/util/0.log" Nov 25 22:42:21 crc kubenswrapper[4910]: I1125 22:42:21.837946 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc_3c3663ce-55da-4575-b39f-43df7bf5e729/pull/0.log" Nov 25 22:42:21 crc kubenswrapper[4910]: I1125 22:42:21.861606 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc_3c3663ce-55da-4575-b39f-43df7bf5e729/util/0.log" Nov 25 22:42:21 crc kubenswrapper[4910]: I1125 22:42:21.914388 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc_3c3663ce-55da-4575-b39f-43df7bf5e729/pull/0.log" Nov 25 22:42:22 crc kubenswrapper[4910]: I1125 22:42:22.085187 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc_3c3663ce-55da-4575-b39f-43df7bf5e729/util/0.log" Nov 25 22:42:22 crc kubenswrapper[4910]: I1125 22:42:22.100147 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc_3c3663ce-55da-4575-b39f-43df7bf5e729/pull/0.log" Nov 25 22:42:22 crc kubenswrapper[4910]: I1125 22:42:22.106759 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772enf8dc_3c3663ce-55da-4575-b39f-43df7bf5e729/extract/0.log" Nov 25 22:42:22 crc kubenswrapper[4910]: I1125 22:42:22.276748 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-776qn_7d6e52b7-c568-4b50-9af0-70a8ce753479/extract-utilities/0.log" Nov 25 22:42:22 crc kubenswrapper[4910]: I1125 22:42:22.480438 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-776qn_7d6e52b7-c568-4b50-9af0-70a8ce753479/extract-utilities/0.log" Nov 25 22:42:22 crc kubenswrapper[4910]: I1125 22:42:22.482468 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-776qn_7d6e52b7-c568-4b50-9af0-70a8ce753479/extract-content/0.log" Nov 25 22:42:22 crc kubenswrapper[4910]: I1125 22:42:22.552428 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-776qn_7d6e52b7-c568-4b50-9af0-70a8ce753479/extract-content/0.log" Nov 25 22:42:22 crc kubenswrapper[4910]: I1125 22:42:22.639518 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-776qn_7d6e52b7-c568-4b50-9af0-70a8ce753479/extract-utilities/0.log" Nov 25 22:42:22 crc kubenswrapper[4910]: I1125 22:42:22.698709 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-776qn_7d6e52b7-c568-4b50-9af0-70a8ce753479/extract-content/0.log" Nov 25 22:42:22 crc kubenswrapper[4910]: I1125 22:42:22.889077 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-whlck_2a179743-09db-4281-b030-23d453ecc1d6/extract-utilities/0.log" Nov 25 22:42:23 crc kubenswrapper[4910]: I1125 22:42:23.318279 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-776qn_7d6e52b7-c568-4b50-9af0-70a8ce753479/registry-server/0.log" Nov 25 22:42:23 crc kubenswrapper[4910]: I1125 22:42:23.774910 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-whlck_2a179743-09db-4281-b030-23d453ecc1d6/extract-content/0.log" Nov 25 22:42:23 crc kubenswrapper[4910]: I1125 22:42:23.787378 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-whlck_2a179743-09db-4281-b030-23d453ecc1d6/extract-utilities/0.log" Nov 25 22:42:23 crc kubenswrapper[4910]: I1125 22:42:23.796871 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-whlck_2a179743-09db-4281-b030-23d453ecc1d6/extract-content/0.log" Nov 25 22:42:24 crc kubenswrapper[4910]: I1125 22:42:24.000971 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-whlck_2a179743-09db-4281-b030-23d453ecc1d6/extract-utilities/0.log" Nov 25 22:42:24 crc kubenswrapper[4910]: I1125 22:42:24.020749 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-whlck_2a179743-09db-4281-b030-23d453ecc1d6/extract-content/0.log" Nov 25 22:42:24 crc kubenswrapper[4910]: I1125 22:42:24.232389 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l_52380560-51e8-43b1-9b6e-8036f43b20c3/util/0.log" Nov 25 22:42:24 crc kubenswrapper[4910]: I1125 22:42:24.534631 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l_52380560-51e8-43b1-9b6e-8036f43b20c3/util/0.log" Nov 25 22:42:24 crc kubenswrapper[4910]: I1125 22:42:24.562324 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l_52380560-51e8-43b1-9b6e-8036f43b20c3/pull/0.log" Nov 25 22:42:24 crc kubenswrapper[4910]: I1125 22:42:24.626982 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l_52380560-51e8-43b1-9b6e-8036f43b20c3/pull/0.log" Nov 25 22:42:24 crc kubenswrapper[4910]: I1125 22:42:24.688479 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-whlck_2a179743-09db-4281-b030-23d453ecc1d6/registry-server/0.log" Nov 25 22:42:24 crc kubenswrapper[4910]: I1125 22:42:24.822311 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l_52380560-51e8-43b1-9b6e-8036f43b20c3/pull/0.log" Nov 25 22:42:24 crc kubenswrapper[4910]: I1125 22:42:24.835998 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l_52380560-51e8-43b1-9b6e-8036f43b20c3/util/0.log" Nov 25 22:42:24 crc kubenswrapper[4910]: I1125 22:42:24.848005 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6tqw7l_52380560-51e8-43b1-9b6e-8036f43b20c3/extract/0.log" Nov 25 22:42:24 crc kubenswrapper[4910]: I1125 22:42:24.939572 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-smb6m_e2af0945-04e5-4220-981f-d7a4892fcf69/marketplace-operator/0.log" Nov 25 22:42:25 crc kubenswrapper[4910]: I1125 22:42:25.002857 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-drk5r_ada3f847-455b-464f-9d23-7052e0d91f2b/extract-utilities/0.log" Nov 25 22:42:25 crc kubenswrapper[4910]: I1125 22:42:25.228574 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-drk5r_ada3f847-455b-464f-9d23-7052e0d91f2b/extract-content/0.log" Nov 25 22:42:25 crc kubenswrapper[4910]: I1125 22:42:25.271886 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-drk5r_ada3f847-455b-464f-9d23-7052e0d91f2b/extract-utilities/0.log" Nov 25 22:42:25 crc kubenswrapper[4910]: I1125 22:42:25.282545 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-drk5r_ada3f847-455b-464f-9d23-7052e0d91f2b/extract-content/0.log" Nov 25 22:42:25 crc kubenswrapper[4910]: I1125 22:42:25.442263 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-drk5r_ada3f847-455b-464f-9d23-7052e0d91f2b/extract-utilities/0.log" Nov 25 22:42:25 crc kubenswrapper[4910]: I1125 22:42:25.462356 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-drk5r_ada3f847-455b-464f-9d23-7052e0d91f2b/extract-content/0.log" Nov 25 22:42:25 crc kubenswrapper[4910]: I1125 22:42:25.502666 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-94j8s_c4ff820e-8002-4ef2-aec8-296b6ec41494/extract-utilities/0.log" Nov 25 22:42:25 crc kubenswrapper[4910]: I1125 22:42:25.674672 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-drk5r_ada3f847-455b-464f-9d23-7052e0d91f2b/registry-server/0.log" Nov 25 22:42:25 crc kubenswrapper[4910]: I1125 22:42:25.743600 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-94j8s_c4ff820e-8002-4ef2-aec8-296b6ec41494/extract-utilities/0.log" Nov 25 22:42:25 crc kubenswrapper[4910]: I1125 22:42:25.751516 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-94j8s_c4ff820e-8002-4ef2-aec8-296b6ec41494/extract-content/0.log" Nov 25 22:42:25 crc kubenswrapper[4910]: I1125 22:42:25.781290 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-94j8s_c4ff820e-8002-4ef2-aec8-296b6ec41494/extract-content/0.log" Nov 25 22:42:25 crc kubenswrapper[4910]: I1125 22:42:25.997836 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-94j8s_c4ff820e-8002-4ef2-aec8-296b6ec41494/extract-content/0.log" Nov 25 22:42:26 crc kubenswrapper[4910]: I1125 22:42:26.055781 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-94j8s_c4ff820e-8002-4ef2-aec8-296b6ec41494/extract-utilities/0.log" Nov 25 22:42:26 crc kubenswrapper[4910]: I1125 22:42:26.175447 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-94j8s_c4ff820e-8002-4ef2-aec8-296b6ec41494/registry-server/0.log" Nov 25 22:42:28 crc kubenswrapper[4910]: I1125 22:42:28.204151 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:42:28 crc kubenswrapper[4910]: E1125 22:42:28.205071 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:42:39 crc kubenswrapper[4910]: I1125 22:42:39.205286 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:42:39 crc kubenswrapper[4910]: E1125 22:42:39.206692 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.401109 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-trdmc"] Nov 25 22:42:51 crc kubenswrapper[4910]: E1125 22:42:51.403498 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" containerName="registry-server" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.403583 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" containerName="registry-server" Nov 25 22:42:51 crc kubenswrapper[4910]: E1125 22:42:51.403657 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" containerName="extract-content" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.403708 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" containerName="extract-content" Nov 25 22:42:51 crc kubenswrapper[4910]: E1125 22:42:51.403764 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" containerName="extract-utilities" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.403815 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" containerName="extract-utilities" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.404106 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="213a6afd-d9b6-4977-b1ab-40ad59f6e4a7" containerName="registry-server" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.406590 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.417576 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-trdmc"] Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.517797 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-catalog-content\") pod \"certified-operators-trdmc\" (UID: \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\") " pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.517942 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-utilities\") pod \"certified-operators-trdmc\" (UID: \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\") " pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.518027 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlc6j\" (UniqueName: \"kubernetes.io/projected/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-kube-api-access-jlc6j\") pod \"certified-operators-trdmc\" (UID: \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\") " pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.619912 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-catalog-content\") pod \"certified-operators-trdmc\" (UID: \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\") " pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.619979 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-utilities\") pod \"certified-operators-trdmc\" (UID: \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\") " pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.620526 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-catalog-content\") pod \"certified-operators-trdmc\" (UID: \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\") " pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.620605 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-utilities\") pod \"certified-operators-trdmc\" (UID: \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\") " pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.620646 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlc6j\" (UniqueName: \"kubernetes.io/projected/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-kube-api-access-jlc6j\") pod \"certified-operators-trdmc\" (UID: \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\") " pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.653194 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlc6j\" (UniqueName: \"kubernetes.io/projected/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-kube-api-access-jlc6j\") pod \"certified-operators-trdmc\" (UID: \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\") " pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:42:51 crc kubenswrapper[4910]: I1125 22:42:51.740802 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.406007 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-trdmc"] Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.422014 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-hwlcb"] Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.424366 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.438767 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwlcb"] Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.541990 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bdsm\" (UniqueName: \"kubernetes.io/projected/5e8273d4-adfb-4759-8332-3e804e2f526c-kube-api-access-8bdsm\") pod \"redhat-marketplace-hwlcb\" (UID: \"5e8273d4-adfb-4759-8332-3e804e2f526c\") " pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.542174 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e8273d4-adfb-4759-8332-3e804e2f526c-catalog-content\") pod \"redhat-marketplace-hwlcb\" (UID: \"5e8273d4-adfb-4759-8332-3e804e2f526c\") " pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.542279 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e8273d4-adfb-4759-8332-3e804e2f526c-utilities\") pod \"redhat-marketplace-hwlcb\" (UID: \"5e8273d4-adfb-4759-8332-3e804e2f526c\") " pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.646712 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bdsm\" (UniqueName: \"kubernetes.io/projected/5e8273d4-adfb-4759-8332-3e804e2f526c-kube-api-access-8bdsm\") pod \"redhat-marketplace-hwlcb\" (UID: \"5e8273d4-adfb-4759-8332-3e804e2f526c\") " pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.646850 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e8273d4-adfb-4759-8332-3e804e2f526c-catalog-content\") pod \"redhat-marketplace-hwlcb\" (UID: \"5e8273d4-adfb-4759-8332-3e804e2f526c\") " pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.646918 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e8273d4-adfb-4759-8332-3e804e2f526c-utilities\") pod \"redhat-marketplace-hwlcb\" (UID: \"5e8273d4-adfb-4759-8332-3e804e2f526c\") " pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.645232 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-trdmc" event={"ID":"77a96243-e1e2-4a0e-a695-e5dece3a6f4b","Type":"ContainerStarted","Data":"d85d6c385f0bb0bf13d57fad610bccd8e7d9ce13b36ccb8097eaf5f1427e9594"} Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.647528 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e8273d4-adfb-4759-8332-3e804e2f526c-utilities\") pod \"redhat-marketplace-hwlcb\" (UID: \"5e8273d4-adfb-4759-8332-3e804e2f526c\") " pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.650683 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e8273d4-adfb-4759-8332-3e804e2f526c-catalog-content\") pod \"redhat-marketplace-hwlcb\" (UID: \"5e8273d4-adfb-4759-8332-3e804e2f526c\") " pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.694298 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bdsm\" (UniqueName: \"kubernetes.io/projected/5e8273d4-adfb-4759-8332-3e804e2f526c-kube-api-access-8bdsm\") pod \"redhat-marketplace-hwlcb\" (UID: \"5e8273d4-adfb-4759-8332-3e804e2f526c\") " pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:42:52 crc kubenswrapper[4910]: I1125 22:42:52.788102 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:42:53 crc kubenswrapper[4910]: I1125 22:42:53.346857 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwlcb"] Nov 25 22:42:53 crc kubenswrapper[4910]: W1125 22:42:53.377515 4910 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e8273d4_adfb_4759_8332_3e804e2f526c.slice/crio-b39b21d5f537d3c6f0a35f17b675b621a24965ff35b3ebc839904906781d12bb WatchSource:0}: Error finding container b39b21d5f537d3c6f0a35f17b675b621a24965ff35b3ebc839904906781d12bb: Status 404 returned error can't find the container with id b39b21d5f537d3c6f0a35f17b675b621a24965ff35b3ebc839904906781d12bb Nov 25 22:42:53 crc kubenswrapper[4910]: I1125 22:42:53.663485 4910 generic.go:334] "Generic (PLEG): container finished" podID="5e8273d4-adfb-4759-8332-3e804e2f526c" containerID="16fcdd7d1845159b16cb94076f7b6ac5a737aa57b3d2a9497baf2b81a2020003" exitCode=0 Nov 25 22:42:53 crc kubenswrapper[4910]: I1125 22:42:53.663731 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwlcb" event={"ID":"5e8273d4-adfb-4759-8332-3e804e2f526c","Type":"ContainerDied","Data":"16fcdd7d1845159b16cb94076f7b6ac5a737aa57b3d2a9497baf2b81a2020003"} Nov 25 22:42:53 crc kubenswrapper[4910]: I1125 22:42:53.663789 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwlcb" event={"ID":"5e8273d4-adfb-4759-8332-3e804e2f526c","Type":"ContainerStarted","Data":"b39b21d5f537d3c6f0a35f17b675b621a24965ff35b3ebc839904906781d12bb"} Nov 25 22:42:53 crc kubenswrapper[4910]: I1125 22:42:53.671961 4910 generic.go:334] "Generic (PLEG): container finished" podID="77a96243-e1e2-4a0e-a695-e5dece3a6f4b" containerID="ea166ab93a6ee34012e3c68b3482422ee492fbe4d1f5efc202bec85a34f5f11f" exitCode=0 Nov 25 22:42:53 crc kubenswrapper[4910]: I1125 22:42:53.672034 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-trdmc" event={"ID":"77a96243-e1e2-4a0e-a695-e5dece3a6f4b","Type":"ContainerDied","Data":"ea166ab93a6ee34012e3c68b3482422ee492fbe4d1f5efc202bec85a34f5f11f"} Nov 25 22:42:54 crc kubenswrapper[4910]: I1125 22:42:54.205163 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:42:54 crc kubenswrapper[4910]: E1125 22:42:54.206173 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:42:54 crc kubenswrapper[4910]: E1125 22:42:54.467276 4910 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.142:35692->38.102.83.142:46763: write tcp 38.102.83.142:35692->38.102.83.142:46763: write: broken pipe Nov 25 22:42:54 crc kubenswrapper[4910]: I1125 22:42:54.695888 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwlcb" event={"ID":"5e8273d4-adfb-4759-8332-3e804e2f526c","Type":"ContainerStarted","Data":"37c40e65e3fdee4b309c3bd28cfbffe5c993fe9fac10e99721eb0bc1e37045d1"} Nov 25 22:42:55 crc kubenswrapper[4910]: I1125 22:42:55.708977 4910 generic.go:334] "Generic (PLEG): container finished" podID="77a96243-e1e2-4a0e-a695-e5dece3a6f4b" containerID="389cfe0dc52acbc6d34a6ba1facc352b1f77f7613fcf0657b5ec27b8c100eca9" exitCode=0 Nov 25 22:42:55 crc kubenswrapper[4910]: I1125 22:42:55.709089 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-trdmc" event={"ID":"77a96243-e1e2-4a0e-a695-e5dece3a6f4b","Type":"ContainerDied","Data":"389cfe0dc52acbc6d34a6ba1facc352b1f77f7613fcf0657b5ec27b8c100eca9"} Nov 25 22:42:55 crc kubenswrapper[4910]: I1125 22:42:55.712997 4910 generic.go:334] "Generic (PLEG): container finished" podID="5e8273d4-adfb-4759-8332-3e804e2f526c" containerID="37c40e65e3fdee4b309c3bd28cfbffe5c993fe9fac10e99721eb0bc1e37045d1" exitCode=0 Nov 25 22:42:55 crc kubenswrapper[4910]: I1125 22:42:55.713043 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwlcb" event={"ID":"5e8273d4-adfb-4759-8332-3e804e2f526c","Type":"ContainerDied","Data":"37c40e65e3fdee4b309c3bd28cfbffe5c993fe9fac10e99721eb0bc1e37045d1"} Nov 25 22:42:56 crc kubenswrapper[4910]: I1125 22:42:56.727806 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-trdmc" event={"ID":"77a96243-e1e2-4a0e-a695-e5dece3a6f4b","Type":"ContainerStarted","Data":"820a603a380014c1fd6345709ebdb49972563cb1d121f3d87c864a556530f450"} Nov 25 22:42:56 crc kubenswrapper[4910]: I1125 22:42:56.731670 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwlcb" event={"ID":"5e8273d4-adfb-4759-8332-3e804e2f526c","Type":"ContainerStarted","Data":"ef208e78adb040160b8ef534d625b08d6603e84811c2ca89194d8cb350aaacfb"} Nov 25 22:42:56 crc kubenswrapper[4910]: I1125 22:42:56.765385 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-trdmc" podStartSLOduration=3.295466885 podStartE2EDuration="5.76535787s" podCreationTimestamp="2025-11-25 22:42:51 +0000 UTC" firstStartedPulling="2025-11-25 22:42:53.673753728 +0000 UTC m=+4329.136230050" lastFinishedPulling="2025-11-25 22:42:56.143644703 +0000 UTC m=+4331.606121035" observedRunningTime="2025-11-25 22:42:56.761179119 +0000 UTC m=+4332.223655441" watchObservedRunningTime="2025-11-25 22:42:56.76535787 +0000 UTC m=+4332.227834192" Nov 25 22:42:56 crc kubenswrapper[4910]: I1125 22:42:56.787167 4910 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-hwlcb" podStartSLOduration=2.348777255 podStartE2EDuration="4.787149561s" podCreationTimestamp="2025-11-25 22:42:52 +0000 UTC" firstStartedPulling="2025-11-25 22:42:53.667548312 +0000 UTC m=+4329.130024624" lastFinishedPulling="2025-11-25 22:42:56.105920608 +0000 UTC m=+4331.568396930" observedRunningTime="2025-11-25 22:42:56.786026571 +0000 UTC m=+4332.248502893" watchObservedRunningTime="2025-11-25 22:42:56.787149561 +0000 UTC m=+4332.249625883" Nov 25 22:43:01 crc kubenswrapper[4910]: I1125 22:43:01.742267 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:43:01 crc kubenswrapper[4910]: I1125 22:43:01.743302 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:43:01 crc kubenswrapper[4910]: I1125 22:43:01.806785 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:43:01 crc kubenswrapper[4910]: I1125 22:43:01.882524 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:43:02 crc kubenswrapper[4910]: I1125 22:43:02.181888 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-trdmc"] Nov 25 22:43:02 crc kubenswrapper[4910]: I1125 22:43:02.791762 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:43:02 crc kubenswrapper[4910]: I1125 22:43:02.792294 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:43:03 crc kubenswrapper[4910]: I1125 22:43:03.220823 4910 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:43:03 crc kubenswrapper[4910]: I1125 22:43:03.823227 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-trdmc" podUID="77a96243-e1e2-4a0e-a695-e5dece3a6f4b" containerName="registry-server" containerID="cri-o://820a603a380014c1fd6345709ebdb49972563cb1d121f3d87c864a556530f450" gracePeriod=2 Nov 25 22:43:03 crc kubenswrapper[4910]: I1125 22:43:03.911324 4910 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.350045 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.419755 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-utilities\") pod \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\" (UID: \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\") " Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.419873 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-catalog-content\") pod \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\" (UID: \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\") " Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.419912 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlc6j\" (UniqueName: \"kubernetes.io/projected/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-kube-api-access-jlc6j\") pod \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\" (UID: \"77a96243-e1e2-4a0e-a695-e5dece3a6f4b\") " Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.422763 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-utilities" (OuterVolumeSpecName: "utilities") pod "77a96243-e1e2-4a0e-a695-e5dece3a6f4b" (UID: "77a96243-e1e2-4a0e-a695-e5dece3a6f4b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.433561 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-kube-api-access-jlc6j" (OuterVolumeSpecName: "kube-api-access-jlc6j") pod "77a96243-e1e2-4a0e-a695-e5dece3a6f4b" (UID: "77a96243-e1e2-4a0e-a695-e5dece3a6f4b"). InnerVolumeSpecName "kube-api-access-jlc6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.481737 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "77a96243-e1e2-4a0e-a695-e5dece3a6f4b" (UID: "77a96243-e1e2-4a0e-a695-e5dece3a6f4b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.523341 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.523385 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.523399 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlc6j\" (UniqueName: \"kubernetes.io/projected/77a96243-e1e2-4a0e-a695-e5dece3a6f4b-kube-api-access-jlc6j\") on node \"crc\" DevicePath \"\"" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.588651 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwlcb"] Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.840027 4910 generic.go:334] "Generic (PLEG): container finished" podID="77a96243-e1e2-4a0e-a695-e5dece3a6f4b" containerID="820a603a380014c1fd6345709ebdb49972563cb1d121f3d87c864a556530f450" exitCode=0 Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.840128 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-trdmc" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.840144 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-trdmc" event={"ID":"77a96243-e1e2-4a0e-a695-e5dece3a6f4b","Type":"ContainerDied","Data":"820a603a380014c1fd6345709ebdb49972563cb1d121f3d87c864a556530f450"} Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.840227 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-trdmc" event={"ID":"77a96243-e1e2-4a0e-a695-e5dece3a6f4b","Type":"ContainerDied","Data":"d85d6c385f0bb0bf13d57fad610bccd8e7d9ce13b36ccb8097eaf5f1427e9594"} Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.840300 4910 scope.go:117] "RemoveContainer" containerID="820a603a380014c1fd6345709ebdb49972563cb1d121f3d87c864a556530f450" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.884868 4910 scope.go:117] "RemoveContainer" containerID="389cfe0dc52acbc6d34a6ba1facc352b1f77f7613fcf0657b5ec27b8c100eca9" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.904190 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-trdmc"] Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.917639 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-trdmc"] Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.923750 4910 scope.go:117] "RemoveContainer" containerID="ea166ab93a6ee34012e3c68b3482422ee492fbe4d1f5efc202bec85a34f5f11f" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.969801 4910 scope.go:117] "RemoveContainer" containerID="820a603a380014c1fd6345709ebdb49972563cb1d121f3d87c864a556530f450" Nov 25 22:43:04 crc kubenswrapper[4910]: E1125 22:43:04.970673 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"820a603a380014c1fd6345709ebdb49972563cb1d121f3d87c864a556530f450\": container with ID starting with 820a603a380014c1fd6345709ebdb49972563cb1d121f3d87c864a556530f450 not found: ID does not exist" containerID="820a603a380014c1fd6345709ebdb49972563cb1d121f3d87c864a556530f450" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.970750 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"820a603a380014c1fd6345709ebdb49972563cb1d121f3d87c864a556530f450"} err="failed to get container status \"820a603a380014c1fd6345709ebdb49972563cb1d121f3d87c864a556530f450\": rpc error: code = NotFound desc = could not find container \"820a603a380014c1fd6345709ebdb49972563cb1d121f3d87c864a556530f450\": container with ID starting with 820a603a380014c1fd6345709ebdb49972563cb1d121f3d87c864a556530f450 not found: ID does not exist" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.970796 4910 scope.go:117] "RemoveContainer" containerID="389cfe0dc52acbc6d34a6ba1facc352b1f77f7613fcf0657b5ec27b8c100eca9" Nov 25 22:43:04 crc kubenswrapper[4910]: E1125 22:43:04.971265 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"389cfe0dc52acbc6d34a6ba1facc352b1f77f7613fcf0657b5ec27b8c100eca9\": container with ID starting with 389cfe0dc52acbc6d34a6ba1facc352b1f77f7613fcf0657b5ec27b8c100eca9 not found: ID does not exist" containerID="389cfe0dc52acbc6d34a6ba1facc352b1f77f7613fcf0657b5ec27b8c100eca9" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.971301 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"389cfe0dc52acbc6d34a6ba1facc352b1f77f7613fcf0657b5ec27b8c100eca9"} err="failed to get container status \"389cfe0dc52acbc6d34a6ba1facc352b1f77f7613fcf0657b5ec27b8c100eca9\": rpc error: code = NotFound desc = could not find container \"389cfe0dc52acbc6d34a6ba1facc352b1f77f7613fcf0657b5ec27b8c100eca9\": container with ID starting with 389cfe0dc52acbc6d34a6ba1facc352b1f77f7613fcf0657b5ec27b8c100eca9 not found: ID does not exist" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.971321 4910 scope.go:117] "RemoveContainer" containerID="ea166ab93a6ee34012e3c68b3482422ee492fbe4d1f5efc202bec85a34f5f11f" Nov 25 22:43:04 crc kubenswrapper[4910]: E1125 22:43:04.974643 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea166ab93a6ee34012e3c68b3482422ee492fbe4d1f5efc202bec85a34f5f11f\": container with ID starting with ea166ab93a6ee34012e3c68b3482422ee492fbe4d1f5efc202bec85a34f5f11f not found: ID does not exist" containerID="ea166ab93a6ee34012e3c68b3482422ee492fbe4d1f5efc202bec85a34f5f11f" Nov 25 22:43:04 crc kubenswrapper[4910]: I1125 22:43:04.974730 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea166ab93a6ee34012e3c68b3482422ee492fbe4d1f5efc202bec85a34f5f11f"} err="failed to get container status \"ea166ab93a6ee34012e3c68b3482422ee492fbe4d1f5efc202bec85a34f5f11f\": rpc error: code = NotFound desc = could not find container \"ea166ab93a6ee34012e3c68b3482422ee492fbe4d1f5efc202bec85a34f5f11f\": container with ID starting with ea166ab93a6ee34012e3c68b3482422ee492fbe4d1f5efc202bec85a34f5f11f not found: ID does not exist" Nov 25 22:43:05 crc kubenswrapper[4910]: I1125 22:43:05.219732 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:43:05 crc kubenswrapper[4910]: E1125 22:43:05.220138 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:43:05 crc kubenswrapper[4910]: I1125 22:43:05.229968 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77a96243-e1e2-4a0e-a695-e5dece3a6f4b" path="/var/lib/kubelet/pods/77a96243-e1e2-4a0e-a695-e5dece3a6f4b/volumes" Nov 25 22:43:05 crc kubenswrapper[4910]: I1125 22:43:05.859169 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-hwlcb" podUID="5e8273d4-adfb-4759-8332-3e804e2f526c" containerName="registry-server" containerID="cri-o://ef208e78adb040160b8ef534d625b08d6603e84811c2ca89194d8cb350aaacfb" gracePeriod=2 Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.441044 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.597138 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e8273d4-adfb-4759-8332-3e804e2f526c-catalog-content\") pod \"5e8273d4-adfb-4759-8332-3e804e2f526c\" (UID: \"5e8273d4-adfb-4759-8332-3e804e2f526c\") " Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.597369 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8bdsm\" (UniqueName: \"kubernetes.io/projected/5e8273d4-adfb-4759-8332-3e804e2f526c-kube-api-access-8bdsm\") pod \"5e8273d4-adfb-4759-8332-3e804e2f526c\" (UID: \"5e8273d4-adfb-4759-8332-3e804e2f526c\") " Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.597609 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e8273d4-adfb-4759-8332-3e804e2f526c-utilities\") pod \"5e8273d4-adfb-4759-8332-3e804e2f526c\" (UID: \"5e8273d4-adfb-4759-8332-3e804e2f526c\") " Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.599064 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e8273d4-adfb-4759-8332-3e804e2f526c-utilities" (OuterVolumeSpecName: "utilities") pod "5e8273d4-adfb-4759-8332-3e804e2f526c" (UID: "5e8273d4-adfb-4759-8332-3e804e2f526c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.617340 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5e8273d4-adfb-4759-8332-3e804e2f526c-kube-api-access-8bdsm" (OuterVolumeSpecName: "kube-api-access-8bdsm") pod "5e8273d4-adfb-4759-8332-3e804e2f526c" (UID: "5e8273d4-adfb-4759-8332-3e804e2f526c"). InnerVolumeSpecName "kube-api-access-8bdsm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.622983 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5e8273d4-adfb-4759-8332-3e804e2f526c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5e8273d4-adfb-4759-8332-3e804e2f526c" (UID: "5e8273d4-adfb-4759-8332-3e804e2f526c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.700674 4910 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5e8273d4-adfb-4759-8332-3e804e2f526c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.700717 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8bdsm\" (UniqueName: \"kubernetes.io/projected/5e8273d4-adfb-4759-8332-3e804e2f526c-kube-api-access-8bdsm\") on node \"crc\" DevicePath \"\"" Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.700731 4910 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5e8273d4-adfb-4759-8332-3e804e2f526c-utilities\") on node \"crc\" DevicePath \"\"" Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.880834 4910 generic.go:334] "Generic (PLEG): container finished" podID="5e8273d4-adfb-4759-8332-3e804e2f526c" containerID="ef208e78adb040160b8ef534d625b08d6603e84811c2ca89194d8cb350aaacfb" exitCode=0 Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.880907 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwlcb" event={"ID":"5e8273d4-adfb-4759-8332-3e804e2f526c","Type":"ContainerDied","Data":"ef208e78adb040160b8ef534d625b08d6603e84811c2ca89194d8cb350aaacfb"} Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.880962 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-hwlcb" event={"ID":"5e8273d4-adfb-4759-8332-3e804e2f526c","Type":"ContainerDied","Data":"b39b21d5f537d3c6f0a35f17b675b621a24965ff35b3ebc839904906781d12bb"} Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.881008 4910 scope.go:117] "RemoveContainer" containerID="ef208e78adb040160b8ef534d625b08d6603e84811c2ca89194d8cb350aaacfb" Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.881291 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-hwlcb" Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.942337 4910 scope.go:117] "RemoveContainer" containerID="37c40e65e3fdee4b309c3bd28cfbffe5c993fe9fac10e99721eb0bc1e37045d1" Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.950598 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwlcb"] Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.976187 4910 scope.go:117] "RemoveContainer" containerID="16fcdd7d1845159b16cb94076f7b6ac5a737aa57b3d2a9497baf2b81a2020003" Nov 25 22:43:06 crc kubenswrapper[4910]: I1125 22:43:06.984887 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-hwlcb"] Nov 25 22:43:07 crc kubenswrapper[4910]: I1125 22:43:07.029240 4910 scope.go:117] "RemoveContainer" containerID="ef208e78adb040160b8ef534d625b08d6603e84811c2ca89194d8cb350aaacfb" Nov 25 22:43:07 crc kubenswrapper[4910]: E1125 22:43:07.034347 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef208e78adb040160b8ef534d625b08d6603e84811c2ca89194d8cb350aaacfb\": container with ID starting with ef208e78adb040160b8ef534d625b08d6603e84811c2ca89194d8cb350aaacfb not found: ID does not exist" containerID="ef208e78adb040160b8ef534d625b08d6603e84811c2ca89194d8cb350aaacfb" Nov 25 22:43:07 crc kubenswrapper[4910]: I1125 22:43:07.034409 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef208e78adb040160b8ef534d625b08d6603e84811c2ca89194d8cb350aaacfb"} err="failed to get container status \"ef208e78adb040160b8ef534d625b08d6603e84811c2ca89194d8cb350aaacfb\": rpc error: code = NotFound desc = could not find container \"ef208e78adb040160b8ef534d625b08d6603e84811c2ca89194d8cb350aaacfb\": container with ID starting with ef208e78adb040160b8ef534d625b08d6603e84811c2ca89194d8cb350aaacfb not found: ID does not exist" Nov 25 22:43:07 crc kubenswrapper[4910]: I1125 22:43:07.034452 4910 scope.go:117] "RemoveContainer" containerID="37c40e65e3fdee4b309c3bd28cfbffe5c993fe9fac10e99721eb0bc1e37045d1" Nov 25 22:43:07 crc kubenswrapper[4910]: E1125 22:43:07.034835 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37c40e65e3fdee4b309c3bd28cfbffe5c993fe9fac10e99721eb0bc1e37045d1\": container with ID starting with 37c40e65e3fdee4b309c3bd28cfbffe5c993fe9fac10e99721eb0bc1e37045d1 not found: ID does not exist" containerID="37c40e65e3fdee4b309c3bd28cfbffe5c993fe9fac10e99721eb0bc1e37045d1" Nov 25 22:43:07 crc kubenswrapper[4910]: I1125 22:43:07.034880 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37c40e65e3fdee4b309c3bd28cfbffe5c993fe9fac10e99721eb0bc1e37045d1"} err="failed to get container status \"37c40e65e3fdee4b309c3bd28cfbffe5c993fe9fac10e99721eb0bc1e37045d1\": rpc error: code = NotFound desc = could not find container \"37c40e65e3fdee4b309c3bd28cfbffe5c993fe9fac10e99721eb0bc1e37045d1\": container with ID starting with 37c40e65e3fdee4b309c3bd28cfbffe5c993fe9fac10e99721eb0bc1e37045d1 not found: ID does not exist" Nov 25 22:43:07 crc kubenswrapper[4910]: I1125 22:43:07.034909 4910 scope.go:117] "RemoveContainer" containerID="16fcdd7d1845159b16cb94076f7b6ac5a737aa57b3d2a9497baf2b81a2020003" Nov 25 22:43:07 crc kubenswrapper[4910]: E1125 22:43:07.035212 4910 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16fcdd7d1845159b16cb94076f7b6ac5a737aa57b3d2a9497baf2b81a2020003\": container with ID starting with 16fcdd7d1845159b16cb94076f7b6ac5a737aa57b3d2a9497baf2b81a2020003 not found: ID does not exist" containerID="16fcdd7d1845159b16cb94076f7b6ac5a737aa57b3d2a9497baf2b81a2020003" Nov 25 22:43:07 crc kubenswrapper[4910]: I1125 22:43:07.035282 4910 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16fcdd7d1845159b16cb94076f7b6ac5a737aa57b3d2a9497baf2b81a2020003"} err="failed to get container status \"16fcdd7d1845159b16cb94076f7b6ac5a737aa57b3d2a9497baf2b81a2020003\": rpc error: code = NotFound desc = could not find container \"16fcdd7d1845159b16cb94076f7b6ac5a737aa57b3d2a9497baf2b81a2020003\": container with ID starting with 16fcdd7d1845159b16cb94076f7b6ac5a737aa57b3d2a9497baf2b81a2020003 not found: ID does not exist" Nov 25 22:43:07 crc kubenswrapper[4910]: I1125 22:43:07.227976 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5e8273d4-adfb-4759-8332-3e804e2f526c" path="/var/lib/kubelet/pods/5e8273d4-adfb-4759-8332-3e804e2f526c/volumes" Nov 25 22:43:20 crc kubenswrapper[4910]: I1125 22:43:20.204130 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:43:20 crc kubenswrapper[4910]: E1125 22:43:20.205376 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:43:35 crc kubenswrapper[4910]: I1125 22:43:35.219439 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:43:35 crc kubenswrapper[4910]: E1125 22:43:35.221362 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:43:47 crc kubenswrapper[4910]: I1125 22:43:47.204653 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:43:47 crc kubenswrapper[4910]: E1125 22:43:47.205730 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:43:58 crc kubenswrapper[4910]: I1125 22:43:58.205598 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:43:58 crc kubenswrapper[4910]: E1125 22:43:58.207024 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:44:11 crc kubenswrapper[4910]: I1125 22:44:11.205471 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:44:11 crc kubenswrapper[4910]: E1125 22:44:11.207078 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:44:17 crc kubenswrapper[4910]: I1125 22:44:17.853502 4910 generic.go:334] "Generic (PLEG): container finished" podID="a9e9b562-e71e-40a8-8cfc-7f9564f635db" containerID="fe6b8e2cfb5993a4eca343957b8154e77df17a01cb6a0d32a7df22810b2bb53a" exitCode=0 Nov 25 22:44:17 crc kubenswrapper[4910]: I1125 22:44:17.853624 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-4srbz/must-gather-5zv5t" event={"ID":"a9e9b562-e71e-40a8-8cfc-7f9564f635db","Type":"ContainerDied","Data":"fe6b8e2cfb5993a4eca343957b8154e77df17a01cb6a0d32a7df22810b2bb53a"} Nov 25 22:44:17 crc kubenswrapper[4910]: I1125 22:44:17.856239 4910 scope.go:117] "RemoveContainer" containerID="fe6b8e2cfb5993a4eca343957b8154e77df17a01cb6a0d32a7df22810b2bb53a" Nov 25 22:44:18 crc kubenswrapper[4910]: I1125 22:44:18.426746 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-4srbz_must-gather-5zv5t_a9e9b562-e71e-40a8-8cfc-7f9564f635db/gather/0.log" Nov 25 22:44:23 crc kubenswrapper[4910]: I1125 22:44:23.206258 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:44:23 crc kubenswrapper[4910]: E1125 22:44:23.207458 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:44:29 crc kubenswrapper[4910]: I1125 22:44:29.792117 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-4srbz/must-gather-5zv5t"] Nov 25 22:44:29 crc kubenswrapper[4910]: I1125 22:44:29.802724 4910 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-4srbz/must-gather-5zv5t" podUID="a9e9b562-e71e-40a8-8cfc-7f9564f635db" containerName="copy" containerID="cri-o://365d183e3c7f847fe0399b0fcaf3687761b1da201282feb3e6a3feba7e85a2bb" gracePeriod=2 Nov 25 22:44:29 crc kubenswrapper[4910]: I1125 22:44:29.827414 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-4srbz/must-gather-5zv5t"] Nov 25 22:44:30 crc kubenswrapper[4910]: I1125 22:44:30.013143 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-4srbz_must-gather-5zv5t_a9e9b562-e71e-40a8-8cfc-7f9564f635db/copy/0.log" Nov 25 22:44:30 crc kubenswrapper[4910]: I1125 22:44:30.013825 4910 generic.go:334] "Generic (PLEG): container finished" podID="a9e9b562-e71e-40a8-8cfc-7f9564f635db" containerID="365d183e3c7f847fe0399b0fcaf3687761b1da201282feb3e6a3feba7e85a2bb" exitCode=143 Nov 25 22:44:30 crc kubenswrapper[4910]: E1125 22:44:30.060742 4910 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9e9b562_e71e_40a8_8cfc_7f9564f635db.slice/crio-365d183e3c7f847fe0399b0fcaf3687761b1da201282feb3e6a3feba7e85a2bb.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9e9b562_e71e_40a8_8cfc_7f9564f635db.slice/crio-conmon-365d183e3c7f847fe0399b0fcaf3687761b1da201282feb3e6a3feba7e85a2bb.scope\": RecentStats: unable to find data in memory cache]" Nov 25 22:44:30 crc kubenswrapper[4910]: I1125 22:44:30.268051 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-4srbz_must-gather-5zv5t_a9e9b562-e71e-40a8-8cfc-7f9564f635db/copy/0.log" Nov 25 22:44:30 crc kubenswrapper[4910]: I1125 22:44:30.269542 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/must-gather-5zv5t" Nov 25 22:44:30 crc kubenswrapper[4910]: I1125 22:44:30.315971 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pb4j4\" (UniqueName: \"kubernetes.io/projected/a9e9b562-e71e-40a8-8cfc-7f9564f635db-kube-api-access-pb4j4\") pod \"a9e9b562-e71e-40a8-8cfc-7f9564f635db\" (UID: \"a9e9b562-e71e-40a8-8cfc-7f9564f635db\") " Nov 25 22:44:30 crc kubenswrapper[4910]: I1125 22:44:30.316504 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a9e9b562-e71e-40a8-8cfc-7f9564f635db-must-gather-output\") pod \"a9e9b562-e71e-40a8-8cfc-7f9564f635db\" (UID: \"a9e9b562-e71e-40a8-8cfc-7f9564f635db\") " Nov 25 22:44:30 crc kubenswrapper[4910]: I1125 22:44:30.326417 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9e9b562-e71e-40a8-8cfc-7f9564f635db-kube-api-access-pb4j4" (OuterVolumeSpecName: "kube-api-access-pb4j4") pod "a9e9b562-e71e-40a8-8cfc-7f9564f635db" (UID: "a9e9b562-e71e-40a8-8cfc-7f9564f635db"). InnerVolumeSpecName "kube-api-access-pb4j4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:44:30 crc kubenswrapper[4910]: I1125 22:44:30.419954 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pb4j4\" (UniqueName: \"kubernetes.io/projected/a9e9b562-e71e-40a8-8cfc-7f9564f635db-kube-api-access-pb4j4\") on node \"crc\" DevicePath \"\"" Nov 25 22:44:30 crc kubenswrapper[4910]: I1125 22:44:30.476102 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9e9b562-e71e-40a8-8cfc-7f9564f635db-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "a9e9b562-e71e-40a8-8cfc-7f9564f635db" (UID: "a9e9b562-e71e-40a8-8cfc-7f9564f635db"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 25 22:44:30 crc kubenswrapper[4910]: I1125 22:44:30.527525 4910 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/a9e9b562-e71e-40a8-8cfc-7f9564f635db-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 25 22:44:31 crc kubenswrapper[4910]: I1125 22:44:31.029098 4910 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-4srbz_must-gather-5zv5t_a9e9b562-e71e-40a8-8cfc-7f9564f635db/copy/0.log" Nov 25 22:44:31 crc kubenswrapper[4910]: I1125 22:44:31.029719 4910 scope.go:117] "RemoveContainer" containerID="365d183e3c7f847fe0399b0fcaf3687761b1da201282feb3e6a3feba7e85a2bb" Nov 25 22:44:31 crc kubenswrapper[4910]: I1125 22:44:31.029751 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-4srbz/must-gather-5zv5t" Nov 25 22:44:31 crc kubenswrapper[4910]: I1125 22:44:31.059106 4910 scope.go:117] "RemoveContainer" containerID="fe6b8e2cfb5993a4eca343957b8154e77df17a01cb6a0d32a7df22810b2bb53a" Nov 25 22:44:31 crc kubenswrapper[4910]: I1125 22:44:31.216482 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9e9b562-e71e-40a8-8cfc-7f9564f635db" path="/var/lib/kubelet/pods/a9e9b562-e71e-40a8-8cfc-7f9564f635db/volumes" Nov 25 22:44:34 crc kubenswrapper[4910]: I1125 22:44:34.204463 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:44:34 crc kubenswrapper[4910]: E1125 22:44:34.205312 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:44:49 crc kubenswrapper[4910]: I1125 22:44:49.206941 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:44:49 crc kubenswrapper[4910]: E1125 22:44:49.208095 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.190280 4910 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls"] Nov 25 22:45:00 crc kubenswrapper[4910]: E1125 22:45:00.191479 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77a96243-e1e2-4a0e-a695-e5dece3a6f4b" containerName="extract-utilities" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.191501 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="77a96243-e1e2-4a0e-a695-e5dece3a6f4b" containerName="extract-utilities" Nov 25 22:45:00 crc kubenswrapper[4910]: E1125 22:45:00.191529 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e8273d4-adfb-4759-8332-3e804e2f526c" containerName="extract-utilities" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.191537 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e8273d4-adfb-4759-8332-3e804e2f526c" containerName="extract-utilities" Nov 25 22:45:00 crc kubenswrapper[4910]: E1125 22:45:00.191553 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9e9b562-e71e-40a8-8cfc-7f9564f635db" containerName="gather" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.191562 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9e9b562-e71e-40a8-8cfc-7f9564f635db" containerName="gather" Nov 25 22:45:00 crc kubenswrapper[4910]: E1125 22:45:00.191574 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9e9b562-e71e-40a8-8cfc-7f9564f635db" containerName="copy" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.191581 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9e9b562-e71e-40a8-8cfc-7f9564f635db" containerName="copy" Nov 25 22:45:00 crc kubenswrapper[4910]: E1125 22:45:00.191603 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77a96243-e1e2-4a0e-a695-e5dece3a6f4b" containerName="registry-server" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.191610 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="77a96243-e1e2-4a0e-a695-e5dece3a6f4b" containerName="registry-server" Nov 25 22:45:00 crc kubenswrapper[4910]: E1125 22:45:00.191627 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e8273d4-adfb-4759-8332-3e804e2f526c" containerName="registry-server" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.191634 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e8273d4-adfb-4759-8332-3e804e2f526c" containerName="registry-server" Nov 25 22:45:00 crc kubenswrapper[4910]: E1125 22:45:00.191648 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77a96243-e1e2-4a0e-a695-e5dece3a6f4b" containerName="extract-content" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.191655 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="77a96243-e1e2-4a0e-a695-e5dece3a6f4b" containerName="extract-content" Nov 25 22:45:00 crc kubenswrapper[4910]: E1125 22:45:00.191686 4910 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5e8273d4-adfb-4759-8332-3e804e2f526c" containerName="extract-content" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.191695 4910 state_mem.go:107] "Deleted CPUSet assignment" podUID="5e8273d4-adfb-4759-8332-3e804e2f526c" containerName="extract-content" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.192074 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9e9b562-e71e-40a8-8cfc-7f9564f635db" containerName="gather" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.192100 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="5e8273d4-adfb-4759-8332-3e804e2f526c" containerName="registry-server" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.192115 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="77a96243-e1e2-4a0e-a695-e5dece3a6f4b" containerName="registry-server" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.192169 4910 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9e9b562-e71e-40a8-8cfc-7f9564f635db" containerName="copy" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.193233 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.196784 4910 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.197163 4910 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.206292 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls"] Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.329427 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7b4041e5-e468-4485-a2c3-fd00e0ae291f-config-volume\") pod \"collect-profiles-29401845-j96ls\" (UID: \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.329509 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7b4041e5-e468-4485-a2c3-fd00e0ae291f-secret-volume\") pod \"collect-profiles-29401845-j96ls\" (UID: \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.329560 4910 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m48sv\" (UniqueName: \"kubernetes.io/projected/7b4041e5-e468-4485-a2c3-fd00e0ae291f-kube-api-access-m48sv\") pod \"collect-profiles-29401845-j96ls\" (UID: \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.431522 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7b4041e5-e468-4485-a2c3-fd00e0ae291f-secret-volume\") pod \"collect-profiles-29401845-j96ls\" (UID: \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.431626 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m48sv\" (UniqueName: \"kubernetes.io/projected/7b4041e5-e468-4485-a2c3-fd00e0ae291f-kube-api-access-m48sv\") pod \"collect-profiles-29401845-j96ls\" (UID: \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.431909 4910 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7b4041e5-e468-4485-a2c3-fd00e0ae291f-config-volume\") pod \"collect-profiles-29401845-j96ls\" (UID: \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.433748 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7b4041e5-e468-4485-a2c3-fd00e0ae291f-config-volume\") pod \"collect-profiles-29401845-j96ls\" (UID: \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.444553 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7b4041e5-e468-4485-a2c3-fd00e0ae291f-secret-volume\") pod \"collect-profiles-29401845-j96ls\" (UID: \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.452969 4910 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m48sv\" (UniqueName: \"kubernetes.io/projected/7b4041e5-e468-4485-a2c3-fd00e0ae291f-kube-api-access-m48sv\") pod \"collect-profiles-29401845-j96ls\" (UID: \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" Nov 25 22:45:00 crc kubenswrapper[4910]: I1125 22:45:00.522792 4910 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" Nov 25 22:45:01 crc kubenswrapper[4910]: I1125 22:45:01.012371 4910 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls"] Nov 25 22:45:01 crc kubenswrapper[4910]: I1125 22:45:01.205281 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:45:01 crc kubenswrapper[4910]: E1125 22:45:01.205874 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:45:01 crc kubenswrapper[4910]: I1125 22:45:01.385832 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" event={"ID":"7b4041e5-e468-4485-a2c3-fd00e0ae291f","Type":"ContainerStarted","Data":"029667026e97be73fd81dcf76a18b894791b511d5e9dc13d5584d556162b3b0b"} Nov 25 22:45:01 crc kubenswrapper[4910]: I1125 22:45:01.386213 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" event={"ID":"7b4041e5-e468-4485-a2c3-fd00e0ae291f","Type":"ContainerStarted","Data":"d01337c8bc222b0878e372b4ec0afb6e68e72f294c7f5e8753bce212d512a1d1"} Nov 25 22:45:02 crc kubenswrapper[4910]: I1125 22:45:02.398725 4910 generic.go:334] "Generic (PLEG): container finished" podID="7b4041e5-e468-4485-a2c3-fd00e0ae291f" containerID="029667026e97be73fd81dcf76a18b894791b511d5e9dc13d5584d556162b3b0b" exitCode=0 Nov 25 22:45:02 crc kubenswrapper[4910]: I1125 22:45:02.398789 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" event={"ID":"7b4041e5-e468-4485-a2c3-fd00e0ae291f","Type":"ContainerDied","Data":"029667026e97be73fd81dcf76a18b894791b511d5e9dc13d5584d556162b3b0b"} Nov 25 22:45:03 crc kubenswrapper[4910]: I1125 22:45:03.800361 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" Nov 25 22:45:03 crc kubenswrapper[4910]: I1125 22:45:03.916920 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7b4041e5-e468-4485-a2c3-fd00e0ae291f-secret-volume\") pod \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\" (UID: \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\") " Nov 25 22:45:03 crc kubenswrapper[4910]: I1125 22:45:03.917094 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m48sv\" (UniqueName: \"kubernetes.io/projected/7b4041e5-e468-4485-a2c3-fd00e0ae291f-kube-api-access-m48sv\") pod \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\" (UID: \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\") " Nov 25 22:45:03 crc kubenswrapper[4910]: I1125 22:45:03.918919 4910 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7b4041e5-e468-4485-a2c3-fd00e0ae291f-config-volume\") pod \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\" (UID: \"7b4041e5-e468-4485-a2c3-fd00e0ae291f\") " Nov 25 22:45:03 crc kubenswrapper[4910]: I1125 22:45:03.919900 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b4041e5-e468-4485-a2c3-fd00e0ae291f-config-volume" (OuterVolumeSpecName: "config-volume") pod "7b4041e5-e468-4485-a2c3-fd00e0ae291f" (UID: "7b4041e5-e468-4485-a2c3-fd00e0ae291f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 25 22:45:03 crc kubenswrapper[4910]: I1125 22:45:03.924911 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b4041e5-e468-4485-a2c3-fd00e0ae291f-kube-api-access-m48sv" (OuterVolumeSpecName: "kube-api-access-m48sv") pod "7b4041e5-e468-4485-a2c3-fd00e0ae291f" (UID: "7b4041e5-e468-4485-a2c3-fd00e0ae291f"). InnerVolumeSpecName "kube-api-access-m48sv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 25 22:45:03 crc kubenswrapper[4910]: I1125 22:45:03.925632 4910 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b4041e5-e468-4485-a2c3-fd00e0ae291f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "7b4041e5-e468-4485-a2c3-fd00e0ae291f" (UID: "7b4041e5-e468-4485-a2c3-fd00e0ae291f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 25 22:45:04 crc kubenswrapper[4910]: I1125 22:45:04.022406 4910 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/7b4041e5-e468-4485-a2c3-fd00e0ae291f-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 25 22:45:04 crc kubenswrapper[4910]: I1125 22:45:04.022454 4910 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m48sv\" (UniqueName: \"kubernetes.io/projected/7b4041e5-e468-4485-a2c3-fd00e0ae291f-kube-api-access-m48sv\") on node \"crc\" DevicePath \"\"" Nov 25 22:45:04 crc kubenswrapper[4910]: I1125 22:45:04.022464 4910 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/7b4041e5-e468-4485-a2c3-fd00e0ae291f-config-volume\") on node \"crc\" DevicePath \"\"" Nov 25 22:45:04 crc kubenswrapper[4910]: I1125 22:45:04.440074 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" event={"ID":"7b4041e5-e468-4485-a2c3-fd00e0ae291f","Type":"ContainerDied","Data":"d01337c8bc222b0878e372b4ec0afb6e68e72f294c7f5e8753bce212d512a1d1"} Nov 25 22:45:04 crc kubenswrapper[4910]: I1125 22:45:04.440125 4910 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d01337c8bc222b0878e372b4ec0afb6e68e72f294c7f5e8753bce212d512a1d1" Nov 25 22:45:04 crc kubenswrapper[4910]: I1125 22:45:04.440181 4910 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401845-j96ls" Nov 25 22:45:04 crc kubenswrapper[4910]: I1125 22:45:04.507368 4910 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j"] Nov 25 22:45:04 crc kubenswrapper[4910]: I1125 22:45:04.520050 4910 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401800-2765j"] Nov 25 22:45:05 crc kubenswrapper[4910]: I1125 22:45:05.223138 4910 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a27d007-69bd-497f-acf6-c66e0d55ef38" path="/var/lib/kubelet/pods/2a27d007-69bd-497f-acf6-c66e0d55ef38/volumes" Nov 25 22:45:13 crc kubenswrapper[4910]: I1125 22:45:13.345035 4910 scope.go:117] "RemoveContainer" containerID="4887eae438c09d44692bb940069f937a1b6ff0f004d524b0632d25210d944b31" Nov 25 22:45:13 crc kubenswrapper[4910]: I1125 22:45:13.371605 4910 scope.go:117] "RemoveContainer" containerID="2943ac850f8c2f8d612f8fdc5aa9c2e67838b22d94491265a63323472afef99e" Nov 25 22:45:14 crc kubenswrapper[4910]: I1125 22:45:14.204835 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:45:14 crc kubenswrapper[4910]: E1125 22:45:14.205551 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:45:27 crc kubenswrapper[4910]: I1125 22:45:27.205582 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:45:27 crc kubenswrapper[4910]: E1125 22:45:27.208176 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:45:39 crc kubenswrapper[4910]: I1125 22:45:39.204873 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:45:39 crc kubenswrapper[4910]: E1125 22:45:39.205805 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:45:52 crc kubenswrapper[4910]: I1125 22:45:52.204705 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:45:52 crc kubenswrapper[4910]: E1125 22:45:52.205772 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:46:06 crc kubenswrapper[4910]: I1125 22:46:06.205024 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:46:06 crc kubenswrapper[4910]: E1125 22:46:06.207002 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:46:13 crc kubenswrapper[4910]: I1125 22:46:13.514608 4910 scope.go:117] "RemoveContainer" containerID="5a902b4cab2998513adcecaa23d3d7d13f2061d7e63332f967f63757849c9846" Nov 25 22:46:17 crc kubenswrapper[4910]: I1125 22:46:17.204971 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:46:17 crc kubenswrapper[4910]: E1125 22:46:17.206544 4910 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-g8f4t_openshift-machine-config-operator(89c4a6ab-992c-467f-92fe-1111582e1b49)\"" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" podUID="89c4a6ab-992c-467f-92fe-1111582e1b49" Nov 25 22:46:28 crc kubenswrapper[4910]: I1125 22:46:28.204556 4910 scope.go:117] "RemoveContainer" containerID="08ca30eaead683fba4d7058cd2ac61fe789c3a0ebc86bf83f4f164145016f1fc" Nov 25 22:46:29 crc kubenswrapper[4910]: I1125 22:46:29.467457 4910 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-g8f4t" event={"ID":"89c4a6ab-992c-467f-92fe-1111582e1b49","Type":"ContainerStarted","Data":"7e1d118ae7bd0d5ed9f2c5cc8380b3054063cec592decbb679885493443511d0"} var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111431075024443 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111431075017360 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111417500016500 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111417500015450 5ustar corecore